ngram
listlengths
0
67.8k
[ "= LayersScraper.get_value(building, 'city') province = LayersScraper.get_value(building, 'province') country = LayersScraper.get_value(building, 'country') postal =", "for UofT's buildings. UofT Map is located at http://map.utoronto.ca/. \"\"\" host = 'http://map.utoronto.ca/'", "\"\"\" host = 'http://map.utoronto.ca/' campuses = ['utsg', 'utm', 'utsc'] @staticmethod def scrape(location='.'): \"\"\"Update", "( Buildings.host, 'data/map/', campus ), headers=headers, json=True) return data @staticmethod def get_regions_json(campus): \"\"\"Retrieve", "campus.upper()) Scraper.get(Buildings.host) headers = {'Referer': Buildings.host} data = Scraper.get('%s%s%s' % ( Buildings.host, 'data/map/',", "bs4 import BeautifulSoup from collections import OrderedDict from decimal import * import os", "len(lat_lng) == 2: lat = float(lat_lng[0]) lng = float(lat_lng[1]) polygon = region['points'] doc", "Scraper.save_json(doc, location, _id) Scraper.logger.info('Buildings completed.') @staticmethod def get_map_json(campus): \"\"\"Retrieve the JSON structure from", "('lat', lat), ('lng', lng), ('polygon', polygon) ]) Scraper.save_json(doc, location, _id) Scraper.logger.info('Buildings completed.') @staticmethod", "Scraper.get('%s%s%s' % ( Buildings.host, 'data/map/', campus ), headers=headers, json=True) return data @staticmethod def", "True) street = ' '.join(filter(None, LayersScraper.get_value(building, 'street').split(' '))) city = LayersScraper.get_value(building, 'city') province", "files for this scraper.\"\"\" Scraper.logger.info('Buildings initialized.') for campus in Buildings.campuses: data = Buildings.get_map_json(campus)", "import re class Buildings: \"\"\"A scraper for UofT's buildings. UofT Map is located", "re class Buildings: \"\"\"A scraper for UofT's buildings. UofT Map is located at", "= 'http://map.utoronto.ca/' campuses = ['utsg', 'utm', 'utsc'] @staticmethod def scrape(location='.'): \"\"\"Update the local", "Buildings.get_map_json(campus) regions = Buildings.get_regions_json(campus)['buildings'] for building in data['buildings']: _id = building['id'] code =", "LayersScraper.get_value(building, 'province') country = LayersScraper.get_value(building, 'country') postal = LayersScraper.get_value(building, 'postal') polygon = []", "region['id'] == _id: lat_lng = region['center_point'] if lat_lng: lat_lng = lat_lng[1:-2].split(', ') if", "LayersScraper.get_value(building, 'lng', True) street = ' '.join(filter(None, LayersScraper.get_value(building, 'street').split(' '))) city = LayersScraper.get_value(building,", "= building['code'] name = building['title'] short_name = LayersScraper.get_value(building, 'short_name') lat = LayersScraper.get_value(building, 'lat',", "is located at http://map.utoronto.ca/. \"\"\" host = 'http://map.utoronto.ca/' campuses = ['utsg', 'utm', 'utsc']", "= float(lat_lng[0]) lng = float(lat_lng[1]) polygon = region['points'] doc = OrderedDict([ ('id', _id),", "OrderedDict([ ('street', street), ('city', city), ('province', province), ('country', country), ('postal', postal) ])), ('lat',", "('short_name', short_name), ('campus', campus.upper()), ('address', OrderedDict([ ('street', street), ('city', city), ('province', province), ('country',", "<filename>uoftscrapers/scrapers/buildings/__init__.py from ..utils import Scraper, LayersScraper from bs4 import BeautifulSoup from collections import", "_id) Scraper.logger.info('Buildings completed.') @staticmethod def get_map_json(campus): \"\"\"Retrieve the JSON structure from host.\"\"\" Scraper.logger.info('Scraping", "Scraper.logger.info('Scraping %s.' % campus.upper()) Scraper.get(Buildings.host) headers = {'Referer': Buildings.host} data = Scraper.get('%s%s%s' %", "initialized.') for campus in Buildings.campuses: data = Buildings.get_map_json(campus) regions = Buildings.get_regions_json(campus)['buildings'] for building", "= LayersScraper.get_value(building, 'postal') polygon = [] for region in regions: if region['id'] ==", "'))) city = LayersScraper.get_value(building, 'city') province = LayersScraper.get_value(building, 'province') country = LayersScraper.get_value(building, 'country')", "os import re class Buildings: \"\"\"A scraper for UofT's buildings. UofT Map is", "2: lat = float(lat_lng[0]) lng = float(lat_lng[1]) polygon = region['points'] doc = OrderedDict([", "{'Referer': Buildings.host} data = Scraper.get('%s%s%s' % ( Buildings.host, 'data/map/', campus ), headers=headers, json=True)", "regions: if region['id'] == _id: lat_lng = region['center_point'] if lat_lng: lat_lng = lat_lng[1:-2].split(',", "lat_lng = lat_lng[1:-2].split(', ') if len(lat_lng) == 2: lat = float(lat_lng[0]) lng =", "decimal import * import os import re class Buildings: \"\"\"A scraper for UofT's", "])), ('lat', lat), ('lng', lng), ('polygon', polygon) ]) Scraper.save_json(doc, location, _id) Scraper.logger.info('Buildings completed.')", "\"\"\"Retrieve the JSON structure from host.\"\"\" Scraper.logger.info('Scraping %s.' % campus.upper()) Scraper.get(Buildings.host) headers =", "%s.' % campus.upper()) Scraper.get(Buildings.host) headers = {'Referer': Buildings.host} data = Scraper.get('%s%s%s' % (", "= ['utsg', 'utm', 'utsc'] @staticmethod def scrape(location='.'): \"\"\"Update the local JSON files for", "{'Referer': Buildings.host} data = Scraper.get('%s%s%s' % ( Buildings.host, 'data/regions/', campus ), headers=headers, json=True)", "short_name = LayersScraper.get_value(building, 'short_name') lat = LayersScraper.get_value(building, 'lat', True) lng = LayersScraper.get_value(building, 'lng',", "'http://map.utoronto.ca/' campuses = ['utsg', 'utm', 'utsc'] @staticmethod def scrape(location='.'): \"\"\"Update the local JSON", "LayersScraper.get_value(building, 'city') province = LayersScraper.get_value(building, 'province') country = LayersScraper.get_value(building, 'country') postal = LayersScraper.get_value(building,", "data = Scraper.get('%s%s%s' % ( Buildings.host, 'data/map/', campus ), headers=headers, json=True) return data", "% ( Buildings.host, 'data/map/', campus ), headers=headers, json=True) return data @staticmethod def get_regions_json(campus):", "data['buildings']: _id = building['id'] code = building['code'] name = building['title'] short_name = LayersScraper.get_value(building,", "import os import re class Buildings: \"\"\"A scraper for UofT's buildings. UofT Map", "data = Buildings.get_map_json(campus) regions = Buildings.get_regions_json(campus)['buildings'] for building in data['buildings']: _id = building['id']", "True) lng = LayersScraper.get_value(building, 'lng', True) street = ' '.join(filter(None, LayersScraper.get_value(building, 'street').split(' ')))", "city), ('province', province), ('country', country), ('postal', postal) ])), ('lat', lat), ('lng', lng), ('polygon',", "json=True) return data @staticmethod def get_regions_json(campus): \"\"\"Retrieve the JSON structure from host.\"\"\" Scraper.get(Buildings.host)", "this scraper.\"\"\" Scraper.logger.info('Buildings initialized.') for campus in Buildings.campuses: data = Buildings.get_map_json(campus) regions =", "code = building['code'] name = building['title'] short_name = LayersScraper.get_value(building, 'short_name') lat = LayersScraper.get_value(building,", "from host.\"\"\" Scraper.get(Buildings.host) headers = {'Referer': Buildings.host} data = Scraper.get('%s%s%s' % ( Buildings.host,", "= Buildings.get_map_json(campus) regions = Buildings.get_regions_json(campus)['buildings'] for building in data['buildings']: _id = building['id'] code", "= float(lat_lng[1]) polygon = region['points'] doc = OrderedDict([ ('id', _id), ('code', code), ('name',", "= LayersScraper.get_value(building, 'lat', True) lng = LayersScraper.get_value(building, 'lng', True) street = ' '.join(filter(None,", "campus.upper()), ('address', OrderedDict([ ('street', street), ('city', city), ('province', province), ('country', country), ('postal', postal)", "from decimal import * import os import re class Buildings: \"\"\"A scraper for", "campus ), headers=headers, json=True) return data @staticmethod def get_regions_json(campus): \"\"\"Retrieve the JSON structure", "= [] for region in regions: if region['id'] == _id: lat_lng = region['center_point']", "at http://map.utoronto.ca/. \"\"\" host = 'http://map.utoronto.ca/' campuses = ['utsg', 'utm', 'utsc'] @staticmethod def", "BeautifulSoup from collections import OrderedDict from decimal import * import os import re", "campuses = ['utsg', 'utm', 'utsc'] @staticmethod def scrape(location='.'): \"\"\"Update the local JSON files", "= building['title'] short_name = LayersScraper.get_value(building, 'short_name') lat = LayersScraper.get_value(building, 'lat', True) lng =", "headers=headers, json=True) return data @staticmethod def get_regions_json(campus): \"\"\"Retrieve the JSON structure from host.\"\"\"", "@staticmethod def scrape(location='.'): \"\"\"Update the local JSON files for this scraper.\"\"\" Scraper.logger.info('Buildings initialized.')", "JSON files for this scraper.\"\"\" Scraper.logger.info('Buildings initialized.') for campus in Buildings.campuses: data =", "doc = OrderedDict([ ('id', _id), ('code', code), ('name', name), ('short_name', short_name), ('campus', campus.upper()),", "== 2: lat = float(lat_lng[0]) lng = float(lat_lng[1]) polygon = region['points'] doc =", "= LayersScraper.get_value(building, 'country') postal = LayersScraper.get_value(building, 'postal') polygon = [] for region in", "buildings. UofT Map is located at http://map.utoronto.ca/. \"\"\" host = 'http://map.utoronto.ca/' campuses =", "region['center_point'] if lat_lng: lat_lng = lat_lng[1:-2].split(', ') if len(lat_lng) == 2: lat =", "import Scraper, LayersScraper from bs4 import BeautifulSoup from collections import OrderedDict from decimal", "= region['points'] doc = OrderedDict([ ('id', _id), ('code', code), ('name', name), ('short_name', short_name),", "data = Scraper.get('%s%s%s' % ( Buildings.host, 'data/regions/', campus ), headers=headers, json=True) return data", "location, _id) Scraper.logger.info('Buildings completed.') @staticmethod def get_map_json(campus): \"\"\"Retrieve the JSON structure from host.\"\"\"", "for building in data['buildings']: _id = building['id'] code = building['code'] name = building['title']", "]) Scraper.save_json(doc, location, _id) Scraper.logger.info('Buildings completed.') @staticmethod def get_map_json(campus): \"\"\"Retrieve the JSON structure", "Scraper.logger.info('Buildings initialized.') for campus in Buildings.campuses: data = Buildings.get_map_json(campus) regions = Buildings.get_regions_json(campus)['buildings'] for", "name), ('short_name', short_name), ('campus', campus.upper()), ('address', OrderedDict([ ('street', street), ('city', city), ('province', province),", "Buildings.host} data = Scraper.get('%s%s%s' % ( Buildings.host, 'data/regions/', campus ), headers=headers, json=True) return", "= {'Referer': Buildings.host} data = Scraper.get('%s%s%s' % ( Buildings.host, 'data/regions/', campus ), headers=headers,", "Scraper, LayersScraper from bs4 import BeautifulSoup from collections import OrderedDict from decimal import", "from bs4 import BeautifulSoup from collections import OrderedDict from decimal import * import", "the JSON structure from host.\"\"\" Scraper.logger.info('Scraping %s.' % campus.upper()) Scraper.get(Buildings.host) headers = {'Referer':", "lng = float(lat_lng[1]) polygon = region['points'] doc = OrderedDict([ ('id', _id), ('code', code),", "building['code'] name = building['title'] short_name = LayersScraper.get_value(building, 'short_name') lat = LayersScraper.get_value(building, 'lat', True)", "\"\"\"A scraper for UofT's buildings. UofT Map is located at http://map.utoronto.ca/. \"\"\" host", "'postal') polygon = [] for region in regions: if region['id'] == _id: lat_lng", "('code', code), ('name', name), ('short_name', short_name), ('campus', campus.upper()), ('address', OrderedDict([ ('street', street), ('city',", "completed.') @staticmethod def get_map_json(campus): \"\"\"Retrieve the JSON structure from host.\"\"\" Scraper.logger.info('Scraping %s.' %", "('name', name), ('short_name', short_name), ('campus', campus.upper()), ('address', OrderedDict([ ('street', street), ('city', city), ('province',", "OrderedDict from decimal import * import os import re class Buildings: \"\"\"A scraper", "LayersScraper from bs4 import BeautifulSoup from collections import OrderedDict from decimal import *", "..utils import Scraper, LayersScraper from bs4 import BeautifulSoup from collections import OrderedDict from", "polygon = region['points'] doc = OrderedDict([ ('id', _id), ('code', code), ('name', name), ('short_name',", "city = LayersScraper.get_value(building, 'city') province = LayersScraper.get_value(building, 'province') country = LayersScraper.get_value(building, 'country') postal", "if region['id'] == _id: lat_lng = region['center_point'] if lat_lng: lat_lng = lat_lng[1:-2].split(', ')", "from ..utils import Scraper, LayersScraper from bs4 import BeautifulSoup from collections import OrderedDict", "JSON structure from host.\"\"\" Scraper.get(Buildings.host) headers = {'Referer': Buildings.host} data = Scraper.get('%s%s%s' %", "province), ('country', country), ('postal', postal) ])), ('lat', lat), ('lng', lng), ('polygon', polygon) ])", "get_map_json(campus): \"\"\"Retrieve the JSON structure from host.\"\"\" Scraper.logger.info('Scraping %s.' % campus.upper()) Scraper.get(Buildings.host) headers", "the local JSON files for this scraper.\"\"\" Scraper.logger.info('Buildings initialized.') for campus in Buildings.campuses:", "regions = Buildings.get_regions_json(campus)['buildings'] for building in data['buildings']: _id = building['id'] code = building['code']", "= LayersScraper.get_value(building, 'short_name') lat = LayersScraper.get_value(building, 'lat', True) lng = LayersScraper.get_value(building, 'lng', True)", "campus in Buildings.campuses: data = Buildings.get_map_json(campus) regions = Buildings.get_regions_json(campus)['buildings'] for building in data['buildings']:", "province = LayersScraper.get_value(building, 'province') country = LayersScraper.get_value(building, 'country') postal = LayersScraper.get_value(building, 'postal') polygon", "('campus', campus.upper()), ('address', OrderedDict([ ('street', street), ('city', city), ('province', province), ('country', country), ('postal',", "http://map.utoronto.ca/. \"\"\" host = 'http://map.utoronto.ca/' campuses = ['utsg', 'utm', 'utsc'] @staticmethod def scrape(location='.'):", "_id = building['id'] code = building['code'] name = building['title'] short_name = LayersScraper.get_value(building, 'short_name')", "from host.\"\"\" Scraper.logger.info('Scraping %s.' % campus.upper()) Scraper.get(Buildings.host) headers = {'Referer': Buildings.host} data =", "lng = LayersScraper.get_value(building, 'lng', True) street = ' '.join(filter(None, LayersScraper.get_value(building, 'street').split(' '))) city", "building in data['buildings']: _id = building['id'] code = building['code'] name = building['title'] short_name", "LayersScraper.get_value(building, 'postal') polygon = [] for region in regions: if region['id'] == _id:", "'country') postal = LayersScraper.get_value(building, 'postal') polygon = [] for region in regions: if", "OrderedDict([ ('id', _id), ('code', code), ('name', name), ('short_name', short_name), ('campus', campus.upper()), ('address', OrderedDict([", "Scraper.get(Buildings.host) headers = {'Referer': Buildings.host} data = Scraper.get('%s%s%s' % ( Buildings.host, 'data/regions/', campus", "LayersScraper.get_value(building, 'lat', True) lng = LayersScraper.get_value(building, 'lng', True) street = ' '.join(filter(None, LayersScraper.get_value(building,", "region in regions: if region['id'] == _id: lat_lng = region['center_point'] if lat_lng: lat_lng", "('country', country), ('postal', postal) ])), ('lat', lat), ('lng', lng), ('polygon', polygon) ]) Scraper.save_json(doc,", "local JSON files for this scraper.\"\"\" Scraper.logger.info('Buildings initialized.') for campus in Buildings.campuses: data", "host.\"\"\" Scraper.get(Buildings.host) headers = {'Referer': Buildings.host} data = Scraper.get('%s%s%s' % ( Buildings.host, 'data/regions/',", "'utm', 'utsc'] @staticmethod def scrape(location='.'): \"\"\"Update the local JSON files for this scraper.\"\"\"", "lat = float(lat_lng[0]) lng = float(lat_lng[1]) polygon = region['points'] doc = OrderedDict([ ('id',", "@staticmethod def get_regions_json(campus): \"\"\"Retrieve the JSON structure from host.\"\"\" Scraper.get(Buildings.host) headers = {'Referer':", "= LayersScraper.get_value(building, 'lng', True) street = ' '.join(filter(None, LayersScraper.get_value(building, 'street').split(' '))) city =", "postal = LayersScraper.get_value(building, 'postal') polygon = [] for region in regions: if region['id']", "('polygon', polygon) ]) Scraper.save_json(doc, location, _id) Scraper.logger.info('Buildings completed.') @staticmethod def get_map_json(campus): \"\"\"Retrieve the", "import * import os import re class Buildings: \"\"\"A scraper for UofT's buildings.", "Buildings: \"\"\"A scraper for UofT's buildings. UofT Map is located at http://map.utoronto.ca/. \"\"\"", "for region in regions: if region['id'] == _id: lat_lng = region['center_point'] if lat_lng:", "('city', city), ('province', province), ('country', country), ('postal', postal) ])), ('lat', lat), ('lng', lng),", "headers = {'Referer': Buildings.host} data = Scraper.get('%s%s%s' % ( Buildings.host, 'data/regions/', campus ),", "in data['buildings']: _id = building['id'] code = building['code'] name = building['title'] short_name =", "building['title'] short_name = LayersScraper.get_value(building, 'short_name') lat = LayersScraper.get_value(building, 'lat', True) lng = LayersScraper.get_value(building,", "if lat_lng: lat_lng = lat_lng[1:-2].split(', ') if len(lat_lng) == 2: lat = float(lat_lng[0])", "_id), ('code', code), ('name', name), ('short_name', short_name), ('campus', campus.upper()), ('address', OrderedDict([ ('street', street),", "short_name), ('campus', campus.upper()), ('address', OrderedDict([ ('street', street), ('city', city), ('province', province), ('country', country),", "headers = {'Referer': Buildings.host} data = Scraper.get('%s%s%s' % ( Buildings.host, 'data/map/', campus ),", "('province', province), ('country', country), ('postal', postal) ])), ('lat', lat), ('lng', lng), ('polygon', polygon)", "= Scraper.get('%s%s%s' % ( Buildings.host, 'data/map/', campus ), headers=headers, json=True) return data @staticmethod", "'data/map/', campus ), headers=headers, json=True) return data @staticmethod def get_regions_json(campus): \"\"\"Retrieve the JSON", "' '.join(filter(None, LayersScraper.get_value(building, 'street').split(' '))) city = LayersScraper.get_value(building, 'city') province = LayersScraper.get_value(building, 'province')", "collections import OrderedDict from decimal import * import os import re class Buildings:", "in Buildings.campuses: data = Buildings.get_map_json(campus) regions = Buildings.get_regions_json(campus)['buildings'] for building in data['buildings']: _id", "in regions: if region['id'] == _id: lat_lng = region['center_point'] if lat_lng: lat_lng =", "= region['center_point'] if lat_lng: lat_lng = lat_lng[1:-2].split(', ') if len(lat_lng) == 2: lat", "'province') country = LayersScraper.get_value(building, 'country') postal = LayersScraper.get_value(building, 'postal') polygon = [] for", "= LayersScraper.get_value(building, 'province') country = LayersScraper.get_value(building, 'country') postal = LayersScraper.get_value(building, 'postal') polygon =", "@staticmethod def get_map_json(campus): \"\"\"Retrieve the JSON structure from host.\"\"\" Scraper.logger.info('Scraping %s.' % campus.upper())", "('id', _id), ('code', code), ('name', name), ('short_name', short_name), ('campus', campus.upper()), ('address', OrderedDict([ ('street',", "UofT's buildings. UofT Map is located at http://map.utoronto.ca/. \"\"\" host = 'http://map.utoronto.ca/' campuses", "for campus in Buildings.campuses: data = Buildings.get_map_json(campus) regions = Buildings.get_regions_json(campus)['buildings'] for building in", "'street').split(' '))) city = LayersScraper.get_value(building, 'city') province = LayersScraper.get_value(building, 'province') country = LayersScraper.get_value(building,", "def get_regions_json(campus): \"\"\"Retrieve the JSON structure from host.\"\"\" Scraper.get(Buildings.host) headers = {'Referer': Buildings.host}", "polygon) ]) Scraper.save_json(doc, location, _id) Scraper.logger.info('Buildings completed.') @staticmethod def get_map_json(campus): \"\"\"Retrieve the JSON", "located at http://map.utoronto.ca/. \"\"\" host = 'http://map.utoronto.ca/' campuses = ['utsg', 'utm', 'utsc'] @staticmethod", "Buildings.host, 'data/map/', campus ), headers=headers, json=True) return data @staticmethod def get_regions_json(campus): \"\"\"Retrieve the", "polygon = [] for region in regions: if region['id'] == _id: lat_lng =", "Map is located at http://map.utoronto.ca/. \"\"\" host = 'http://map.utoronto.ca/' campuses = ['utsg', 'utm',", "'city') province = LayersScraper.get_value(building, 'province') country = LayersScraper.get_value(building, 'country') postal = LayersScraper.get_value(building, 'postal')", "\"\"\"Update the local JSON files for this scraper.\"\"\" Scraper.logger.info('Buildings initialized.') for campus in", "return data @staticmethod def get_regions_json(campus): \"\"\"Retrieve the JSON structure from host.\"\"\" Scraper.get(Buildings.host) headers", "lat_lng[1:-2].split(', ') if len(lat_lng) == 2: lat = float(lat_lng[0]) lng = float(lat_lng[1]) polygon", "['utsg', 'utm', 'utsc'] @staticmethod def scrape(location='.'): \"\"\"Update the local JSON files for this", "import OrderedDict from decimal import * import os import re class Buildings: \"\"\"A", "building['id'] code = building['code'] name = building['title'] short_name = LayersScraper.get_value(building, 'short_name') lat =", "'.join(filter(None, LayersScraper.get_value(building, 'street').split(' '))) city = LayersScraper.get_value(building, 'city') province = LayersScraper.get_value(building, 'province') country", "street), ('city', city), ('province', province), ('country', country), ('postal', postal) ])), ('lat', lat), ('lng',", "country), ('postal', postal) ])), ('lat', lat), ('lng', lng), ('polygon', polygon) ]) Scraper.save_json(doc, location,", "get_regions_json(campus): \"\"\"Retrieve the JSON structure from host.\"\"\" Scraper.get(Buildings.host) headers = {'Referer': Buildings.host} data", "Scraper.get(Buildings.host) headers = {'Referer': Buildings.host} data = Scraper.get('%s%s%s' % ( Buildings.host, 'data/map/', campus", "float(lat_lng[1]) polygon = region['points'] doc = OrderedDict([ ('id', _id), ('code', code), ('name', name),", "scraper.\"\"\" Scraper.logger.info('Buildings initialized.') for campus in Buildings.campuses: data = Buildings.get_map_json(campus) regions = Buildings.get_regions_json(campus)['buildings']", "') if len(lat_lng) == 2: lat = float(lat_lng[0]) lng = float(lat_lng[1]) polygon =", "\"\"\"Retrieve the JSON structure from host.\"\"\" Scraper.get(Buildings.host) headers = {'Referer': Buildings.host} data =", "structure from host.\"\"\" Scraper.get(Buildings.host) headers = {'Referer': Buildings.host} data = Scraper.get('%s%s%s' % (", "lat_lng = region['center_point'] if lat_lng: lat_lng = lat_lng[1:-2].split(', ') if len(lat_lng) == 2:", "= building['id'] code = building['code'] name = building['title'] short_name = LayersScraper.get_value(building, 'short_name') lat", "if len(lat_lng) == 2: lat = float(lat_lng[0]) lng = float(lat_lng[1]) polygon = region['points']", "_id: lat_lng = region['center_point'] if lat_lng: lat_lng = lat_lng[1:-2].split(', ') if len(lat_lng) ==", "Buildings.campuses: data = Buildings.get_map_json(campus) regions = Buildings.get_regions_json(campus)['buildings'] for building in data['buildings']: _id =", "Buildings.host} data = Scraper.get('%s%s%s' % ( Buildings.host, 'data/map/', campus ), headers=headers, json=True) return", "LayersScraper.get_value(building, 'country') postal = LayersScraper.get_value(building, 'postal') polygon = [] for region in regions:", "class Buildings: \"\"\"A scraper for UofT's buildings. UofT Map is located at http://map.utoronto.ca/.", "structure from host.\"\"\" Scraper.logger.info('Scraping %s.' % campus.upper()) Scraper.get(Buildings.host) headers = {'Referer': Buildings.host} data", "lat = LayersScraper.get_value(building, 'lat', True) lng = LayersScraper.get_value(building, 'lng', True) street = '", "import BeautifulSoup from collections import OrderedDict from decimal import * import os import", "def scrape(location='.'): \"\"\"Update the local JSON files for this scraper.\"\"\" Scraper.logger.info('Buildings initialized.') for", "== _id: lat_lng = region['center_point'] if lat_lng: lat_lng = lat_lng[1:-2].split(', ') if len(lat_lng)", "('street', street), ('city', city), ('province', province), ('country', country), ('postal', postal) ])), ('lat', lat),", "host.\"\"\" Scraper.logger.info('Scraping %s.' % campus.upper()) Scraper.get(Buildings.host) headers = {'Referer': Buildings.host} data = Scraper.get('%s%s%s'", "lat_lng: lat_lng = lat_lng[1:-2].split(', ') if len(lat_lng) == 2: lat = float(lat_lng[0]) lng", "code), ('name', name), ('short_name', short_name), ('campus', campus.upper()), ('address', OrderedDict([ ('street', street), ('city', city),", "the JSON structure from host.\"\"\" Scraper.get(Buildings.host) headers = {'Referer': Buildings.host} data = Scraper.get('%s%s%s'", "country = LayersScraper.get_value(building, 'country') postal = LayersScraper.get_value(building, 'postal') polygon = [] for region", "LayersScraper.get_value(building, 'street').split(' '))) city = LayersScraper.get_value(building, 'city') province = LayersScraper.get_value(building, 'province') country =", "UofT Map is located at http://map.utoronto.ca/. \"\"\" host = 'http://map.utoronto.ca/' campuses = ['utsg',", "for this scraper.\"\"\" Scraper.logger.info('Buildings initialized.') for campus in Buildings.campuses: data = Buildings.get_map_json(campus) regions", "'short_name') lat = LayersScraper.get_value(building, 'lat', True) lng = LayersScraper.get_value(building, 'lng', True) street =", "scrape(location='.'): \"\"\"Update the local JSON files for this scraper.\"\"\" Scraper.logger.info('Buildings initialized.') for campus", "= lat_lng[1:-2].split(', ') if len(lat_lng) == 2: lat = float(lat_lng[0]) lng = float(lat_lng[1])", "LayersScraper.get_value(building, 'short_name') lat = LayersScraper.get_value(building, 'lat', True) lng = LayersScraper.get_value(building, 'lng', True) street", "from collections import OrderedDict from decimal import * import os import re class", "% campus.upper()) Scraper.get(Buildings.host) headers = {'Referer': Buildings.host} data = Scraper.get('%s%s%s' % ( Buildings.host,", "name = building['title'] short_name = LayersScraper.get_value(building, 'short_name') lat = LayersScraper.get_value(building, 'lat', True) lng", "), headers=headers, json=True) return data @staticmethod def get_regions_json(campus): \"\"\"Retrieve the JSON structure from", "float(lat_lng[0]) lng = float(lat_lng[1]) polygon = region['points'] doc = OrderedDict([ ('id', _id), ('code',", "= OrderedDict([ ('id', _id), ('code', code), ('name', name), ('short_name', short_name), ('campus', campus.upper()), ('address',", "* import os import re class Buildings: \"\"\"A scraper for UofT's buildings. UofT", "('lng', lng), ('polygon', polygon) ]) Scraper.save_json(doc, location, _id) Scraper.logger.info('Buildings completed.') @staticmethod def get_map_json(campus):", "street = ' '.join(filter(None, LayersScraper.get_value(building, 'street').split(' '))) city = LayersScraper.get_value(building, 'city') province =", "postal) ])), ('lat', lat), ('lng', lng), ('polygon', polygon) ]) Scraper.save_json(doc, location, _id) Scraper.logger.info('Buildings", "('postal', postal) ])), ('lat', lat), ('lng', lng), ('polygon', polygon) ]) Scraper.save_json(doc, location, _id)", "= ' '.join(filter(None, LayersScraper.get_value(building, 'street').split(' '))) city = LayersScraper.get_value(building, 'city') province = LayersScraper.get_value(building,", "def get_map_json(campus): \"\"\"Retrieve the JSON structure from host.\"\"\" Scraper.logger.info('Scraping %s.' % campus.upper()) Scraper.get(Buildings.host)", "= {'Referer': Buildings.host} data = Scraper.get('%s%s%s' % ( Buildings.host, 'data/map/', campus ), headers=headers,", "('address', OrderedDict([ ('street', street), ('city', city), ('province', province), ('country', country), ('postal', postal) ])),", "lng), ('polygon', polygon) ]) Scraper.save_json(doc, location, _id) Scraper.logger.info('Buildings completed.') @staticmethod def get_map_json(campus): \"\"\"Retrieve", "[] for region in regions: if region['id'] == _id: lat_lng = region['center_point'] if", "Buildings.get_regions_json(campus)['buildings'] for building in data['buildings']: _id = building['id'] code = building['code'] name =", "region['points'] doc = OrderedDict([ ('id', _id), ('code', code), ('name', name), ('short_name', short_name), ('campus',", "data @staticmethod def get_regions_json(campus): \"\"\"Retrieve the JSON structure from host.\"\"\" Scraper.get(Buildings.host) headers =", "Scraper.logger.info('Buildings completed.') @staticmethod def get_map_json(campus): \"\"\"Retrieve the JSON structure from host.\"\"\" Scraper.logger.info('Scraping %s.'", "= Buildings.get_regions_json(campus)['buildings'] for building in data['buildings']: _id = building['id'] code = building['code'] name", "'lat', True) lng = LayersScraper.get_value(building, 'lng', True) street = ' '.join(filter(None, LayersScraper.get_value(building, 'street').split('", "'lng', True) street = ' '.join(filter(None, LayersScraper.get_value(building, 'street').split(' '))) city = LayersScraper.get_value(building, 'city')", "lat), ('lng', lng), ('polygon', polygon) ]) Scraper.save_json(doc, location, _id) Scraper.logger.info('Buildings completed.') @staticmethod def", "scraper for UofT's buildings. UofT Map is located at http://map.utoronto.ca/. \"\"\" host =", "'utsc'] @staticmethod def scrape(location='.'): \"\"\"Update the local JSON files for this scraper.\"\"\" Scraper.logger.info('Buildings", "JSON structure from host.\"\"\" Scraper.logger.info('Scraping %s.' % campus.upper()) Scraper.get(Buildings.host) headers = {'Referer': Buildings.host}", "host = 'http://map.utoronto.ca/' campuses = ['utsg', 'utm', 'utsc'] @staticmethod def scrape(location='.'): \"\"\"Update the" ]
[ "\\ InventoryArmor, InventoryTool, InventoryWeapon, InventoryWondrousItem, SkillProficiency from .character_class import CharacterClass, Archetype, FeaturesAtLevel, \\", "Characters \"\"\" from .ancestry import Ancestry, SubAncestry from .background import Background from .character", "from .ancestry import Ancestry, SubAncestry from .background import Background from .character import Character,", "SkillProficiency from .character_class import CharacterClass, Archetype, FeaturesAtLevel, \\ SpellsKnownAtLevel, SpellSlotsAtLevel from .feature import", "Background from .character import Character, ClassAndLevel, InventoryAdventuringGear, \\ InventoryArmor, InventoryTool, InventoryWeapon, InventoryWondrousItem, SkillProficiency", "from .character import Character, ClassAndLevel, InventoryAdventuringGear, \\ InventoryArmor, InventoryTool, InventoryWeapon, InventoryWondrousItem, SkillProficiency from", ".character import Character, ClassAndLevel, InventoryAdventuringGear, \\ InventoryArmor, InventoryTool, InventoryWeapon, InventoryWondrousItem, SkillProficiency from .character_class", "ClassAndLevel, InventoryAdventuringGear, \\ InventoryArmor, InventoryTool, InventoryWeapon, InventoryWondrousItem, SkillProficiency from .character_class import CharacterClass, Archetype,", "Character, ClassAndLevel, InventoryAdventuringGear, \\ InventoryArmor, InventoryTool, InventoryWeapon, InventoryWondrousItem, SkillProficiency from .character_class import CharacterClass,", ".ancestry import Ancestry, SubAncestry from .background import Background from .character import Character, ClassAndLevel,", "<gh_stars>0 \"\"\" Models for Characters \"\"\" from .ancestry import Ancestry, SubAncestry from .background", "Ancestry, SubAncestry from .background import Background from .character import Character, ClassAndLevel, InventoryAdventuringGear, \\", "SubAncestry from .background import Background from .character import Character, ClassAndLevel, InventoryAdventuringGear, \\ InventoryArmor,", "import Ancestry, SubAncestry from .background import Background from .character import Character, ClassAndLevel, InventoryAdventuringGear,", "InventoryArmor, InventoryTool, InventoryWeapon, InventoryWondrousItem, SkillProficiency from .character_class import CharacterClass, Archetype, FeaturesAtLevel, \\ SpellsKnownAtLevel,", "InventoryAdventuringGear, \\ InventoryArmor, InventoryTool, InventoryWeapon, InventoryWondrousItem, SkillProficiency from .character_class import CharacterClass, Archetype, FeaturesAtLevel,", "\"\"\" Models for Characters \"\"\" from .ancestry import Ancestry, SubAncestry from .background import", "from .background import Background from .character import Character, ClassAndLevel, InventoryAdventuringGear, \\ InventoryArmor, InventoryTool,", "from .character_class import CharacterClass, Archetype, FeaturesAtLevel, \\ SpellsKnownAtLevel, SpellSlotsAtLevel from .feature import Feature", "Models for Characters \"\"\" from .ancestry import Ancestry, SubAncestry from .background import Background", "import Character, ClassAndLevel, InventoryAdventuringGear, \\ InventoryArmor, InventoryTool, InventoryWeapon, InventoryWondrousItem, SkillProficiency from .character_class import", "import Background from .character import Character, ClassAndLevel, InventoryAdventuringGear, \\ InventoryArmor, InventoryTool, InventoryWeapon, InventoryWondrousItem,", "InventoryWeapon, InventoryWondrousItem, SkillProficiency from .character_class import CharacterClass, Archetype, FeaturesAtLevel, \\ SpellsKnownAtLevel, SpellSlotsAtLevel from", "InventoryTool, InventoryWeapon, InventoryWondrousItem, SkillProficiency from .character_class import CharacterClass, Archetype, FeaturesAtLevel, \\ SpellsKnownAtLevel, SpellSlotsAtLevel", "\"\"\" from .ancestry import Ancestry, SubAncestry from .background import Background from .character import", ".background import Background from .character import Character, ClassAndLevel, InventoryAdventuringGear, \\ InventoryArmor, InventoryTool, InventoryWeapon,", "for Characters \"\"\" from .ancestry import Ancestry, SubAncestry from .background import Background from", "InventoryWondrousItem, SkillProficiency from .character_class import CharacterClass, Archetype, FeaturesAtLevel, \\ SpellsKnownAtLevel, SpellSlotsAtLevel from .feature" ]
[ "print(\"class_info=[\",end='') for i in class_list: if len(i)>=1: print(i,('' if(i==class_list[5]) else ','),end='') num+=1 print(\"]\")", "from bs4 import BeautifulSoup def sigleOrDoubel(str0): num=2; if str0.find('单')!=-1: num=1 elif str0.find('每')!=-1: num=0", "html -> text #reglex import re #import BeautifulSoup from bs4 import BeautifulSoup def", "len(info)==7: #0=1-16, 1=single(1-16), 2=double(1-16) #[flag,time_info,class_name,class_location,week_range,SigleorDouble] class_list[num].append([1,timeinfo,info[0][0:info[0].find('(',1)],info[4],re.match(r'[0-9]-[0-9][0-9]',info[6]).group(),sigleOrDoubel(info[6])]) num+=1 print(\"class_info=[\",end='') for i in class_list: if", "__name__ == \"__main__\": #dealwith the printf soup = BeautifulSoup(open(\"schedule.html\",\"r\",encoding='GBK')) #print(soup.prettify()) tbody=soup.find_all(\"tbody\") all_tr=tbody[0].find_all('tr') class_list=[[]", "num==0: timeinfo=info[2].split('-') if tr['class'][0]=='odd': for i in range(1,8): if len(class_list[i])!=0 and class_list[i][-1][0]==1: class_list[i][-1][0]=0", "timeinfo=info[2].split('-') if tr['class'][0]=='odd': for i in range(1,8): if len(class_list[i])!=0 and class_list[i][-1][0]==1: class_list[i][-1][0]=0 class_list[i][-1][1][1]=timeinfo[1]", "if __name__ == \"__main__\": #dealwith the printf soup = BeautifulSoup(open(\"schedule.html\",\"r\",encoding='GBK')) #print(soup.prettify()) tbody=soup.find_all(\"tbody\") all_tr=tbody[0].find_all('tr')", "<reponame>Mirasire/dotfiles #!/bin/python #feature: html -> text #reglex import re #import BeautifulSoup from bs4", "range(10)] for tr in all_tr[0:len(all_tr)-1]: all_td=tr.find_all('td') num=0 for td in all_td: info=td.contents; if", "class_list[num].append([1,timeinfo,info[0][0:info[0].find('(',1)],info[4],re.match(r'[0-9]-[0-9][0-9]',info[6]).group(),sigleOrDoubel(info[6])]) num+=1 print(\"class_info=[\",end='') for i in class_list: if len(i)>=1: print(i,('' if(i==class_list[5]) else ','),end='')", "for tr in all_tr[0:len(all_tr)-1]: all_td=tr.find_all('td') num=0 for td in all_td: info=td.contents; if num==0:", "elif str0.find('每')!=-1: num=0 return num if __name__ == \"__main__\": #dealwith the printf soup", "2=double(1-16) #[flag,time_info,class_name,class_location,week_range,SigleorDouble] class_list[num].append([1,timeinfo,info[0][0:info[0].find('(',1)],info[4],re.match(r'[0-9]-[0-9][0-9]',info[6]).group(),sigleOrDoubel(info[6])]) num+=1 print(\"class_info=[\",end='') for i in class_list: if len(i)>=1: print(i,('' if(i==class_list[5])", "bs4 import BeautifulSoup def sigleOrDoubel(str0): num=2; if str0.find('单')!=-1: num=1 elif str0.find('每')!=-1: num=0 return", "class_list[i][-1][0]==1: class_list[i][-1][0]=0 class_list[i][-1][1][1]=timeinfo[1] elif len(info)==7: #0=1-16, 1=single(1-16), 2=double(1-16) #[flag,time_info,class_name,class_location,week_range,SigleorDouble] class_list[num].append([1,timeinfo,info[0][0:info[0].find('(',1)],info[4],re.match(r'[0-9]-[0-9][0-9]',info[6]).group(),sigleOrDoubel(info[6])]) num+=1 print(\"class_info=[\",end='') for", "all_td=tr.find_all('td') num=0 for td in all_td: info=td.contents; if num==0: timeinfo=info[2].split('-') if tr['class'][0]=='odd': for", "\"__main__\": #dealwith the printf soup = BeautifulSoup(open(\"schedule.html\",\"r\",encoding='GBK')) #print(soup.prettify()) tbody=soup.find_all(\"tbody\") all_tr=tbody[0].find_all('tr') class_list=[[] for i", "len(class_list[i])!=0 and class_list[i][-1][0]==1: class_list[i][-1][0]=0 class_list[i][-1][1][1]=timeinfo[1] elif len(info)==7: #0=1-16, 1=single(1-16), 2=double(1-16) #[flag,time_info,class_name,class_location,week_range,SigleorDouble] class_list[num].append([1,timeinfo,info[0][0:info[0].find('(',1)],info[4],re.match(r'[0-9]-[0-9][0-9]',info[6]).group(),sigleOrDoubel(info[6])]) num+=1", "class_list[i][-1][0]=0 class_list[i][-1][1][1]=timeinfo[1] elif len(info)==7: #0=1-16, 1=single(1-16), 2=double(1-16) #[flag,time_info,class_name,class_location,week_range,SigleorDouble] class_list[num].append([1,timeinfo,info[0][0:info[0].find('(',1)],info[4],re.match(r'[0-9]-[0-9][0-9]',info[6]).group(),sigleOrDoubel(info[6])]) num+=1 print(\"class_info=[\",end='') for i", "BeautifulSoup def sigleOrDoubel(str0): num=2; if str0.find('单')!=-1: num=1 elif str0.find('每')!=-1: num=0 return num if", "#import BeautifulSoup from bs4 import BeautifulSoup def sigleOrDoubel(str0): num=2; if str0.find('单')!=-1: num=1 elif", "BeautifulSoup(open(\"schedule.html\",\"r\",encoding='GBK')) #print(soup.prettify()) tbody=soup.find_all(\"tbody\") all_tr=tbody[0].find_all('tr') class_list=[[] for i in range(10)] for tr in all_tr[0:len(all_tr)-1]:", "1=single(1-16), 2=double(1-16) #[flag,time_info,class_name,class_location,week_range,SigleorDouble] class_list[num].append([1,timeinfo,info[0][0:info[0].find('(',1)],info[4],re.match(r'[0-9]-[0-9][0-9]',info[6]).group(),sigleOrDoubel(info[6])]) num+=1 print(\"class_info=[\",end='') for i in class_list: if len(i)>=1: print(i,(''", "sigleOrDoubel(str0): num=2; if str0.find('单')!=-1: num=1 elif str0.find('每')!=-1: num=0 return num if __name__ ==", "return num if __name__ == \"__main__\": #dealwith the printf soup = BeautifulSoup(open(\"schedule.html\",\"r\",encoding='GBK')) #print(soup.prettify())", "soup = BeautifulSoup(open(\"schedule.html\",\"r\",encoding='GBK')) #print(soup.prettify()) tbody=soup.find_all(\"tbody\") all_tr=tbody[0].find_all('tr') class_list=[[] for i in range(10)] for tr", "text #reglex import re #import BeautifulSoup from bs4 import BeautifulSoup def sigleOrDoubel(str0): num=2;", "in all_tr[0:len(all_tr)-1]: all_td=tr.find_all('td') num=0 for td in all_td: info=td.contents; if num==0: timeinfo=info[2].split('-') if", "all_tr[0:len(all_tr)-1]: all_td=tr.find_all('td') num=0 for td in all_td: info=td.contents; if num==0: timeinfo=info[2].split('-') if tr['class'][0]=='odd':", "info=td.contents; if num==0: timeinfo=info[2].split('-') if tr['class'][0]=='odd': for i in range(1,8): if len(class_list[i])!=0 and", "if len(class_list[i])!=0 and class_list[i][-1][0]==1: class_list[i][-1][0]=0 class_list[i][-1][1][1]=timeinfo[1] elif len(info)==7: #0=1-16, 1=single(1-16), 2=double(1-16) #[flag,time_info,class_name,class_location,week_range,SigleorDouble] class_list[num].append([1,timeinfo,info[0][0:info[0].find('(',1)],info[4],re.match(r'[0-9]-[0-9][0-9]',info[6]).group(),sigleOrDoubel(info[6])])", "== \"__main__\": #dealwith the printf soup = BeautifulSoup(open(\"schedule.html\",\"r\",encoding='GBK')) #print(soup.prettify()) tbody=soup.find_all(\"tbody\") all_tr=tbody[0].find_all('tr') class_list=[[] for", "= BeautifulSoup(open(\"schedule.html\",\"r\",encoding='GBK')) #print(soup.prettify()) tbody=soup.find_all(\"tbody\") all_tr=tbody[0].find_all('tr') class_list=[[] for i in range(10)] for tr in", "#print(soup.prettify()) tbody=soup.find_all(\"tbody\") all_tr=tbody[0].find_all('tr') class_list=[[] for i in range(10)] for tr in all_tr[0:len(all_tr)-1]: all_td=tr.find_all('td')", "#[flag,time_info,class_name,class_location,week_range,SigleorDouble] class_list[num].append([1,timeinfo,info[0][0:info[0].find('(',1)],info[4],re.match(r'[0-9]-[0-9][0-9]',info[6]).group(),sigleOrDoubel(info[6])]) num+=1 print(\"class_info=[\",end='') for i in class_list: if len(i)>=1: print(i,('' if(i==class_list[5]) else", "in range(1,8): if len(class_list[i])!=0 and class_list[i][-1][0]==1: class_list[i][-1][0]=0 class_list[i][-1][1][1]=timeinfo[1] elif len(info)==7: #0=1-16, 1=single(1-16), 2=double(1-16)", "class_list=[[] for i in range(10)] for tr in all_tr[0:len(all_tr)-1]: all_td=tr.find_all('td') num=0 for td", "num=0 for td in all_td: info=td.contents; if num==0: timeinfo=info[2].split('-') if tr['class'][0]=='odd': for i", "i in range(1,8): if len(class_list[i])!=0 and class_list[i][-1][0]==1: class_list[i][-1][0]=0 class_list[i][-1][1][1]=timeinfo[1] elif len(info)==7: #0=1-16, 1=single(1-16),", "tr in all_tr[0:len(all_tr)-1]: all_td=tr.find_all('td') num=0 for td in all_td: info=td.contents; if num==0: timeinfo=info[2].split('-')", "tr['class'][0]=='odd': for i in range(1,8): if len(class_list[i])!=0 and class_list[i][-1][0]==1: class_list[i][-1][0]=0 class_list[i][-1][1][1]=timeinfo[1] elif len(info)==7:", "if num==0: timeinfo=info[2].split('-') if tr['class'][0]=='odd': for i in range(1,8): if len(class_list[i])!=0 and class_list[i][-1][0]==1:", "def sigleOrDoubel(str0): num=2; if str0.find('单')!=-1: num=1 elif str0.find('每')!=-1: num=0 return num if __name__", "num=0 return num if __name__ == \"__main__\": #dealwith the printf soup = BeautifulSoup(open(\"schedule.html\",\"r\",encoding='GBK'))", "num=2; if str0.find('单')!=-1: num=1 elif str0.find('每')!=-1: num=0 return num if __name__ == \"__main__\":", "elif len(info)==7: #0=1-16, 1=single(1-16), 2=double(1-16) #[flag,time_info,class_name,class_location,week_range,SigleorDouble] class_list[num].append([1,timeinfo,info[0][0:info[0].find('(',1)],info[4],re.match(r'[0-9]-[0-9][0-9]',info[6]).group(),sigleOrDoubel(info[6])]) num+=1 print(\"class_info=[\",end='') for i in class_list:", "td in all_td: info=td.contents; if num==0: timeinfo=info[2].split('-') if tr['class'][0]=='odd': for i in range(1,8):", "for td in all_td: info=td.contents; if num==0: timeinfo=info[2].split('-') if tr['class'][0]=='odd': for i in", "#!/bin/python #feature: html -> text #reglex import re #import BeautifulSoup from bs4 import", "for i in range(1,8): if len(class_list[i])!=0 and class_list[i][-1][0]==1: class_list[i][-1][0]=0 class_list[i][-1][1][1]=timeinfo[1] elif len(info)==7: #0=1-16,", "printf soup = BeautifulSoup(open(\"schedule.html\",\"r\",encoding='GBK')) #print(soup.prettify()) tbody=soup.find_all(\"tbody\") all_tr=tbody[0].find_all('tr') class_list=[[] for i in range(10)] for", "if tr['class'][0]=='odd': for i in range(1,8): if len(class_list[i])!=0 and class_list[i][-1][0]==1: class_list[i][-1][0]=0 class_list[i][-1][1][1]=timeinfo[1] elif", "all_tr=tbody[0].find_all('tr') class_list=[[] for i in range(10)] for tr in all_tr[0:len(all_tr)-1]: all_td=tr.find_all('td') num=0 for", "#feature: html -> text #reglex import re #import BeautifulSoup from bs4 import BeautifulSoup", "import BeautifulSoup def sigleOrDoubel(str0): num=2; if str0.find('单')!=-1: num=1 elif str0.find('每')!=-1: num=0 return num", "range(1,8): if len(class_list[i])!=0 and class_list[i][-1][0]==1: class_list[i][-1][0]=0 class_list[i][-1][1][1]=timeinfo[1] elif len(info)==7: #0=1-16, 1=single(1-16), 2=double(1-16) #[flag,time_info,class_name,class_location,week_range,SigleorDouble]", "all_td: info=td.contents; if num==0: timeinfo=info[2].split('-') if tr['class'][0]=='odd': for i in range(1,8): if len(class_list[i])!=0", "if str0.find('单')!=-1: num=1 elif str0.find('每')!=-1: num=0 return num if __name__ == \"__main__\": #dealwith", "re #import BeautifulSoup from bs4 import BeautifulSoup def sigleOrDoubel(str0): num=2; if str0.find('单')!=-1: num=1", "#reglex import re #import BeautifulSoup from bs4 import BeautifulSoup def sigleOrDoubel(str0): num=2; if", "in range(10)] for tr in all_tr[0:len(all_tr)-1]: all_td=tr.find_all('td') num=0 for td in all_td: info=td.contents;", "class_list[i][-1][1][1]=timeinfo[1] elif len(info)==7: #0=1-16, 1=single(1-16), 2=double(1-16) #[flag,time_info,class_name,class_location,week_range,SigleorDouble] class_list[num].append([1,timeinfo,info[0][0:info[0].find('(',1)],info[4],re.match(r'[0-9]-[0-9][0-9]',info[6]).group(),sigleOrDoubel(info[6])]) num+=1 print(\"class_info=[\",end='') for i in", "and class_list[i][-1][0]==1: class_list[i][-1][0]=0 class_list[i][-1][1][1]=timeinfo[1] elif len(info)==7: #0=1-16, 1=single(1-16), 2=double(1-16) #[flag,time_info,class_name,class_location,week_range,SigleorDouble] class_list[num].append([1,timeinfo,info[0][0:info[0].find('(',1)],info[4],re.match(r'[0-9]-[0-9][0-9]',info[6]).group(),sigleOrDoubel(info[6])]) num+=1 print(\"class_info=[\",end='')", "in all_td: info=td.contents; if num==0: timeinfo=info[2].split('-') if tr['class'][0]=='odd': for i in range(1,8): if", "import re #import BeautifulSoup from bs4 import BeautifulSoup def sigleOrDoubel(str0): num=2; if str0.find('单')!=-1:", "#0=1-16, 1=single(1-16), 2=double(1-16) #[flag,time_info,class_name,class_location,week_range,SigleorDouble] class_list[num].append([1,timeinfo,info[0][0:info[0].find('(',1)],info[4],re.match(r'[0-9]-[0-9][0-9]',info[6]).group(),sigleOrDoubel(info[6])]) num+=1 print(\"class_info=[\",end='') for i in class_list: if len(i)>=1:", "num+=1 print(\"class_info=[\",end='') for i in class_list: if len(i)>=1: print(i,('' if(i==class_list[5]) else ','),end='') num+=1", "for i in range(10)] for tr in all_tr[0:len(all_tr)-1]: all_td=tr.find_all('td') num=0 for td in", "num=1 elif str0.find('每')!=-1: num=0 return num if __name__ == \"__main__\": #dealwith the printf", "the printf soup = BeautifulSoup(open(\"schedule.html\",\"r\",encoding='GBK')) #print(soup.prettify()) tbody=soup.find_all(\"tbody\") all_tr=tbody[0].find_all('tr') class_list=[[] for i in range(10)]", "#dealwith the printf soup = BeautifulSoup(open(\"schedule.html\",\"r\",encoding='GBK')) #print(soup.prettify()) tbody=soup.find_all(\"tbody\") all_tr=tbody[0].find_all('tr') class_list=[[] for i in", "tbody=soup.find_all(\"tbody\") all_tr=tbody[0].find_all('tr') class_list=[[] for i in range(10)] for tr in all_tr[0:len(all_tr)-1]: all_td=tr.find_all('td') num=0", "str0.find('单')!=-1: num=1 elif str0.find('每')!=-1: num=0 return num if __name__ == \"__main__\": #dealwith the", "str0.find('每')!=-1: num=0 return num if __name__ == \"__main__\": #dealwith the printf soup =", "-> text #reglex import re #import BeautifulSoup from bs4 import BeautifulSoup def sigleOrDoubel(str0):", "num if __name__ == \"__main__\": #dealwith the printf soup = BeautifulSoup(open(\"schedule.html\",\"r\",encoding='GBK')) #print(soup.prettify()) tbody=soup.find_all(\"tbody\")", "BeautifulSoup from bs4 import BeautifulSoup def sigleOrDoubel(str0): num=2; if str0.find('单')!=-1: num=1 elif str0.find('每')!=-1:", "i in range(10)] for tr in all_tr[0:len(all_tr)-1]: all_td=tr.find_all('td') num=0 for td in all_td:" ]
[ "address. Need to provide this when doing an Entrez search and fetch\") parser.add_option(\"-d\",", "#!/usr/bin/python # Retrieve FASTA sequence from NCBI based on an Entrez query. #", "return - 4 if not options.info_file: print \"Please specify the download info file", "parser = OptionParser(usage=usage) parser.add_option(\"-m\", \"--email\", dest=\"email\", help=\"Email address. Need to provide this when", "else: # Continue a previous download email = options.email database = options.database web_env", "separately. Sort it out later. Not to crucial. else: print \"Continue a previous", "optparse import OptionParser from Bio import Entrez def main(): usage = \"usage: %prog", "search or fetch (-m EMAIL)\" return - 6 if not options.database: print \"Please", "rettype=\"fasta\", retstart=start, retmax=BATCH_SIZE, webenv=web_env, query_key=query_key) data = fetch_handle.read() except Exception, e: info_file_fd =", "to info_file info_file_fd.write('Number of sequences in original query: %d\\n' % nr_seq_query) info_file_fd.write('Number of", "of the arguments provided. Currently not working because we have 2 options #", "have 2 options # (1) Start new download and (2) Continue previous download.", "query_key = options.query_key start_pos = int(options.start_pos); # should check if start position is", "web_env and query_key for the search.\") (options, args) = parser.parse_args() if not options.continue_download:", "Continue previous download. Need to handle these separately. Sort it out later. Not", "output file (-f FASTA_FILE)\" return - 4 if not options.info_file: print \"Please specify", "search results web_env = results[\"WebEnv\"] query_key = results[\"QueryKey\"] # Write query specific info", "info_file_fd.write('Download sequences from position: %d\\n' % 0) info_file_fd.write('web_env: %s\\n' % web_env) info_file_fd.write('query_key: %s\\n'%", "# do not do this test gi_list it is always 20 # Get", "info_file_fd.write('web_env: %s\\n' % web_env) info_file_fd.write('query_key: %s\\n'% query_key) info_file_fd.write('Downloading sequences in batches of %d\\n'", "of sequences in original query (-n NR_SEQ_QUERY)\" return - 11 if not options.fasta_file:", "# to continue a previous download # entrez_download_fasta.py -m <EMAIL> -d nucleotide -c", "will continue a previous download. User need to provide the WEB_ENV, QUERY_KEY and", "previous download. Need to handle these separately. Sort it out later. Not to", "- 1 if not options.database: print \"Please specify the database to fetch info", "from (-d DATABASE)\" return - 7 if not options.web_env: print \"Please specify the", "the web_env and query_key for the search.\") (options, args) = parser.parse_args() if not", "exit anymore fetch_handle.close() fasta_file_fd.write(data) info_file_fd = open(info_file, \"a\") info_file_fd.write('Downloaded sequence %i to %i\\n'", "\"Cypripedioideae[Orgn] AND matK[Gene]\" -f out.fasta -i download_info.txt # to continue a previous download", "help=\"Please provide the number of sequences found in the original query.\") parser.add_option(\"-f\", \"--fasta_file\",", "\"Please specify the FASTA output file (-f FASTA_FILE)\" return - 12 if not", "WEB_ENV, QUERY_KEY and SEQ_START\") parser.add_option(\"-w\", \"--web_env\", dest=\"web_env\", help=\"Please provide the previous web_env.\") parser.add_option(\"-q\",", "FASTA sequence from NCBI based on an Entrez query. # to start a", "retstart=start, retmax=BATCH_SIZE, webenv=web_env, query_key=query_key) data = fetch_handle.read() except Exception, e: info_file_fd = open(info_file,", "download # entrez_download_fasta.py -m <EMAIL> -d nucleotide -c -w \"NCID_1_38065753_192.168.127.12_9001_1300878409_78339627\" -q 1 -s", "\"Please specify the number of sequences in original query (-n NR_SEQ_QUERY)\" return -", "check if start position is a integer nr_seq_query = int(options.nr_seq_query); # should check", "provide the number of sequences found in the original query.\") parser.add_option(\"-f\", \"--fasta_file\", dest=\"fasta_file\",", "%d\\n' % nr_seq_query) info_file_fd.write('Number of sequences to be dowloaded: %d\\n' % nr_seq_query) info_file_fd.write('Download", "previous web_env (-w WEB_ENV)\" return - 8 if not options.query_key: print \"Please specify", "FASTA_FILE -i INFO_FILE\" parser = OptionParser(usage=usage) parser.add_option(\"-m\", \"--email\", dest=\"email\", help=\"Email address. Need to", "else: print \"Continue a previous download...\" if not options.email: print \"Please specify an", "% entrez_query) try: handle = Entrez.esearch(db=database,term=entrez_query, usehistory='y') results = Entrez.read(handle) handle.close() except Exception,", "this test gi_list it is always 20 # Get web_env and query_key from", "options.email database = options.database web_env = options.web_env query_key = options.query_key start_pos = int(options.start_pos);", "nr_seq_query is a integer fasta_file = options.fasta_file info_file = options.info_file for key, value", "start a new download # entrez_download_fasta.py -m <EMAIL> -d nucleotide -e \"Cypripedioideae[Orgn] AND", "= results[\"WebEnv\"] query_key = results[\"QueryKey\"] # Write query specific info to info_file info_file_fd.write('Number", "from optparse import OptionParser from Bio import Entrez def main(): usage = \"usage:", "'@' :'__at__', } # Start a new download if(not options.continue_download): email = options.email", "to fetch info from (-d DATABASE)\" return - 7 if not options.web_env: print", "100 # Input strings generated by browser/galaxy needs to be replaced mapped_chars =", "% BATCH_SIZE) info_file_fd.close() # Now retrieve the FASTA sequences in batches of 5", "web_env.replace(value, key) query_key = query_key.replace(value, key) Entrez.email = email # Open info_file for", "\"NCID_1_38065753_192.168.127.12_9001_1300878409_78339627\" -q 1 -s 11 -n 38 -f out_continue.fasta -i info_continue.txt import sys", "key) entrez_query = entrez_query.replace(value, key) email = email.replace(value, key) Entrez.email = email #", "new download and (2) Continue previous download. Need to handle these separately. Sort", "= int(options.nr_seq_query); # should check if nr_seq_query is a integer fasta_file = options.fasta_file", "- 10 if not options.nr_seq_query: print \"Please specify the number of sequences in", "later. Not to crucial. else: print \"Continue a previous download...\" if not options.email:", "Entrez.email = email # Open info_file for writing info_file_fd = open(info_file, \"w\") info_file_fd.write('Email", "return - 11 if not options.fasta_file: print \"Please specify the FASTA output file", "= fetch_handle.read() except Exception, e: info_file_fd = open(info_file, \"a\") info_file_fd.write( \"Error raised when", "%i\\n' % (start+1, end)) info_file_fd.close() fasta_file_fd.close() else: # Continue a previous download email", "a integer fasta_file = options.fasta_file info_file = options.info_file for key, value in mapped_chars.items():", "address: %s\\n' % Entrez.email) info_file_fd.write('Database: %s\\n' % database) # Write query specific info", "%s\\n' % Entrez.email) info_file_fd.write('Database: %s\\n' % database) info_file_fd.write('Entrez query: %s\\n' % entrez_query) try:", "sequence %i to %i\" % (start+1, end) try: fetch_handle = Entrez.efetch(db=database, rettype=\"fasta\", retstart=start,", "Bio import Entrez def main(): usage = \"usage: %prog -m EMAIL -d DATABASE", "parser.add_option(\"-q\", \"--query_key\", dest=\"query_key\", help=\"Please provide the previous query_key.\") parser.add_option(\"-s\", \"--start_pos\", dest=\"start_pos\", help=\"Please provide", "handle = Entrez.esearch(db=database,term=entrez_query, usehistory='y') results = Entrez.read(handle) handle.close() except Exception, e: info_file_fd.write( \"Error", "email # Open info_file for writing info_file_fd = open(info_file, \"w\") info_file_fd.write('Email address: %s\\n'", "# Retrieve FASTA sequence from NCBI based on an Entrez query. # to", "print \"Please specify the download info file (-i INFO_FILE)\" return - 5 #", "key, value in mapped_chars.items(): database = database.replace(value, key) entrez_query = entrez_query.replace(value, key) email", "is a integer fasta_file = options.fasta_file info_file = options.info_file for key, value in", "be replaced mapped_chars = { '>' :'__gt__', '<' :'__lt__', '\\'' :'__sq__', '\"' :'__dq__',", "= open(info_file, \"w\") info_file_fd.write('Email address: %s\\n' % Entrez.email) info_file_fd.write('Database: %s\\n' % database) #", "the FASTA sequences in batches of 5 fasta_file_fd = open(fasta_file, \"w\") for start", "in original query (-n NR_SEQ_QUERY)\" return - 11 if not options.fasta_file: print \"Please", "EMAIL)\" return - 6 if not options.database: print \"Please specify the database to", "the download info file (-i INFO_FILE)\" return - 5 # Need to to", "info_file_fd.write('Number of sequences to be dowloaded: %d\\n' % (int(nr_seq_query) - int(start_pos) + 1))", "\"--info_file\", dest=\"info_file\", help=\"Information related to the download. Contains the web_env and query_key for", "int(start_pos) + 1)) info_file_fd.write('Download sequences from position: %d\\n' % start_pos) info_file_fd.write('web_env: %s\\n' %", "sequences from position: %d\\n' % start_pos) info_file_fd.write('web_env: %s\\n' % web_env) info_file_fd.write('query_key: %s\\n' %", "8 if not options.query_key: print \"Please specify the previous query_key (-q QUERY_KEY)\" return", "\"--email\", dest=\"email\", help=\"Email address. Need to provide this when doing an Entrez search", "matK[Gene]\" -f out.fasta -i download_info.txt # to continue a previous download # entrez_download_fasta.py", "for the search.\") (options, args) = parser.parse_args() if not options.continue_download: print \"Start a", "{ '>' :'__gt__', '<' :'__lt__', '\\'' :'__sq__', '\"' :'__dq__', '[' :'__ob__', ']' :'__cb__',", "= options.fasta_file info_file = options.info_file for key, value in mapped_chars.items(): database = database.replace(value,", "% (int(nr_seq_query) - int(start_pos) + 1)) info_file_fd.write('Download sequences from position: %d\\n' % start_pos)", "this when doing an Entrez search or fetch (-m EMAIL)\" return - 1", ":'__dq__', '[' :'__ob__', ']' :'__cb__', '{' :'__oc__', '}' :'__cc__', '@' :'__at__', } #", "query e.g. \\\"Bacteria\\\"[Organism] OR \\\"Archaea\\\"[Organism] OR prokaryotes[All Fields] not \\\"Escherichia coli\\\"[Organism]\") parser.add_option(\"-c\", \"--continue_download\",", "sequences in batches of %d\\n' % BATCH_SIZE) info_file_fd.close() # Now retrieve the FASTA", "= email.replace(value, key) Entrez.email = email # Open info_file for writing info_file_fd =", "sequence to start downloading from (-s START_POS)\" return - 10 if not options.nr_seq_query:", "query_key = results[\"QueryKey\"] # Write query specific info to info_file info_file_fd.write('Number of sequences", "<EMAIL> -d nucleotide -e \"Cypripedioideae[Orgn] AND matK[Gene]\" -f out.fasta -i download_info.txt # to", "search and fetch\") parser.add_option(\"-d\", \"--database\", dest=\"database\", help=\"Database e.g. nucleotide\") parser.add_option(\"-e\", \"--entrez_query\", dest=\"entrez_query\", help=\"Entrez", "-i info_continue.txt import sys import os import string import re from optparse import", "the arguments provided. Currently not working because we have 2 options # (1)", "retrieve the FASTA sequences in batches of 5 fasta_file_fd = open(fasta_file, \"w\") for", "help=\"FASTA output file\") parser.add_option(\"-i\", \"--info_file\", dest=\"info_file\", help=\"Information related to the download. Contains the", "DATABASE)\" return - 2 if not options.entrez_query: print \"Please specify an entrez query", "for start in range(start_pos - 1,nr_seq_query, BATCH_SIZE): end = min(nr_seq_query, start + BATCH_SIZE)", "print \"Please specify the download info file (-i INFO_FILE)\" return - 13 if", "mapped_chars.items(): database = database.replace(value, key) entrez_query = entrez_query.replace(value, key) email = email.replace(value, key)", "sequences to be dowloaded: %d\\n' % (int(nr_seq_query) - int(start_pos) + 1)) info_file_fd.write('Download sequences", "the sequence to start downloading from (-s START_POS)\" return - 10 if not", "FASTA sequences in batches of 5 fasta_file_fd = open(fasta_file, \"w\") for start in", "the length of the arguments provided. Currently not working because we have 2", "try: fetch_handle = Entrez.efetch(db=database, rettype=\"fasta\", retstart=start, retmax=BATCH_SIZE, webenv=web_env, query_key=query_key) data = fetch_handle.read() except", "start in range(0,nr_seq_query, BATCH_SIZE): end = min(nr_seq_query, start + BATCH_SIZE) print \"Dowloading sequence", "query_key.\") parser.add_option(\"-s\", \"--start_pos\", dest=\"start_pos\", help=\"Please provide position of the sequence to start downloading", "database to fetch info from (-d DATABASE)\" return - 7 if not options.web_env:", "\\\"Escherichia coli\\\"[Organism]\") parser.add_option(\"-c\", \"--continue_download\", action=\"store_true\", dest=\"continue_download\", default=False, help=\"If flag is specified program will", "% start_pos) info_file_fd.write('web_env: %s\\n' % web_env) info_file_fd.write('query_key: %s\\n' % query_key) info_file_fd.write('Downloading sequences in", "not \\\"Escherichia coli\\\"[Organism]\") parser.add_option(\"-c\", \"--continue_download\", action=\"store_true\", dest=\"continue_download\", default=False, help=\"If flag is specified program", "%s\\n' % database) # Write query specific info to info_file info_file_fd.write('Number of sequences", "raised! Exiting now!\") # do not exit anymore fetch_handle.close() fasta_file_fd.write(data) info_file_fd = open(info_file,", "sequence to start downloading from. E.g. the position where the previous download failed.\")", "entrez_query.replace(value, key) email = email.replace(value, key) Entrez.email = email # Open info_file for", "help=\"Email address. Need to provide this when doing an Entrez search and fetch\")", "Need to to some checking on the on the length of the arguments", "retmax=BATCH_SIZE, webenv=web_env, query_key=query_key) data = fetch_handle.read() except Exception, e: info_file_fd = open(info_file, \"a\")", "open(info_file, \"w\") info_file_fd.write('Email address: %s\\n' % Entrez.email) info_file_fd.write('Database: %s\\n' % database) info_file_fd.write('Entrez query:", "an Entrez.efind: %s\\n\" % str(e)) info_file_fd.write( \"Retrying...\") info_file_fd.close() # sys.exit( \"Error raised! Exiting", "OptionParser(usage=usage) parser.add_option(\"-m\", \"--email\", dest=\"email\", help=\"Email address. Need to provide this when doing an", "start downloading from. E.g. the position where the previous download failed.\") parser.add_option(\"-n\", \"--nr_seq_query\",", "options.continue_download: print \"Start a new download...\" if not options.email: print \"Please specify an", "end = min(nr_seq_query, start + BATCH_SIZE) print \"Dowloading sequence %i to %i\" %", "raised when trying do an Entrez.efind: %s\\n\" % str(e)) info_file_fd.close() sys.exit( \"Error raised!", "the database to fetch info from (-d DATABASE)\" return - 2 if not", "from search results web_env = results[\"WebEnv\"] query_key = results[\"QueryKey\"] # Write query specific", "download. Contains the web_env and query_key for the search.\") (options, args) = parser.parse_args()", "\"--query_key\", dest=\"query_key\", help=\"Please provide the previous query_key.\") parser.add_option(\"-s\", \"--start_pos\", dest=\"start_pos\", help=\"Please provide position", "specify the download info file (-i INFO_FILE)\" return - 13 if (len(args) >", "email = options.email database = options.database web_env = options.web_env query_key = options.query_key start_pos", "length of the arguments provided. Currently not working because we have 2 options", "%s\\n\" % str(e)) info_file_fd.close() sys.exit( \"Error raised! Exiting now!\") gi_list = results[\"IdList\"] nr_seq_query", "previous download...\" if not options.email: print \"Please specify an email address. Need to", "# Continue a previous download email = options.email database = options.database web_env =", "= Entrez.esearch(db=database,term=entrez_query, usehistory='y') results = Entrez.read(handle) handle.close() except Exception, e: info_file_fd.write( \"Error raised", "1)) info_file_fd.write('Download sequences from position: %d\\n' % start_pos) info_file_fd.write('web_env: %s\\n' % web_env) info_file_fd.write('query_key:", "trying do an Entrez.esearch: %s\\n\" % str(e)) info_file_fd.close() sys.exit( \"Error raised! Exiting now!\")", "help=\"Please provide position of the sequence to start downloading from. E.g. the position", "OptionParser from Bio import Entrez def main(): usage = \"usage: %prog -m EMAIL", "to provide this when doing an Entrez search and fetch\") parser.add_option(\"-d\", \"--database\", dest=\"database\",", "- 7 if not options.web_env: print \"Please specify the previous web_env (-w WEB_ENV)\"", "email address. Need to provide this when doing an Entrez search or fetch", "WEB_ENV -q QUERY_KEY -s START_POS -n NR_SEQ_QUERY -f FASTA_FILE -i INFO_FILE\" parser =", "Not to crucial. else: print \"Continue a previous download...\" if not options.email: print", "query (-e ENTREZ_QUERY)\" return - 3 if not options.fasta_file: print \"Please specify the", "Entrez.read(handle) handle.close() except Exception, e: info_file_fd.write( \"Error raised when trying do an Entrez.esearch:", "number of sequences found in the original query.\") parser.add_option(\"-f\", \"--fasta_file\", dest=\"fasta_file\", help=\"FASTA output", "the position where the previous download failed.\") parser.add_option(\"-n\", \"--nr_seq_query\", dest=\"nr_seq_query\", help=\"Please provide the", "an entrez query (-e ENTREZ_QUERY)\" return - 3 if not options.fasta_file: print \"Please", "info file (-i INFO_FILE)\" return - 13 if (len(args) > 0): print \"Too", "in mapped_chars.items(): database = database.replace(value, key) email = email.replace(value, key) web_env = web_env.replace(value,", "dowloaded: %d\\n' % (int(nr_seq_query) - int(start_pos) + 1)) info_file_fd.write('Download sequences from position: %d\\n'", "int(options.start_pos); # should check if start position is a integer nr_seq_query = int(options.nr_seq_query);", "webenv=web_env, query_key=query_key) data = fetch_handle.read() except Exception, e: info_file_fd = open(info_file, \"a\") info_file_fd.write(", "start_pos) info_file_fd.write('web_env: %s\\n' % web_env) info_file_fd.write('query_key: %s\\n' % query_key) info_file_fd.write('Downloading sequences in batches", "start position is a integer nr_seq_query = int(options.nr_seq_query); # should check if nr_seq_query", "= options.database web_env = options.web_env query_key = options.query_key start_pos = int(options.start_pos); # should", "retrieve the FASTA sequences in batches of BATCH_SIZE fasta_file_fd = open(fasta_file, \"w\") for", "print \"Please specify the database to fetch info from (-d DATABASE)\" return -", "previous download # entrez_download_fasta.py -m <EMAIL> -d nucleotide -c -w \"NCID_1_38065753_192.168.127.12_9001_1300878409_78339627\" -q 1", "related to the download. Contains the web_env and query_key for the search.\") (options,", "11 if not options.fasta_file: print \"Please specify the FASTA output file (-f FASTA_FILE)\"", "provide this when doing an Entrez search and fetch\") parser.add_option(\"-d\", \"--database\", dest=\"database\", help=\"Database", "dest=\"entrez_query\", help=\"Entrez query e.g. \\\"Bacteria\\\"[Organism] OR \\\"Archaea\\\"[Organism] OR prokaryotes[All Fields] not \\\"Escherichia coli\\\"[Organism]\")", "specify the database to fetch info from (-d DATABASE)\" return - 7 if", "9 if not options.start_pos: print \"Please specify the position of the sequence to", "integer fasta_file = options.fasta_file info_file = options.info_file for key, value in mapped_chars.items(): database", "default=False, help=\"If flag is specified program will continue a previous download. User need", "% Entrez.email) info_file_fd.write('Database: %s\\n' % database) info_file_fd.write('Entrez query: %s\\n' % entrez_query) try: handle", "nr_seq_query = int(options.nr_seq_query); # should check if nr_seq_query is a integer fasta_file =", "options.query_key: print \"Please specify the previous query_key (-q QUERY_KEY)\" return - 9 if", "anymore fetch_handle.close() fasta_file_fd.write(data) info_file_fd = open(info_file, \"a\") info_file_fd.write('Downloaded sequence %i to %i\\n' %", "original query.\") parser.add_option(\"-f\", \"--fasta_file\", dest=\"fasta_file\", help=\"FASTA output file\") parser.add_option(\"-i\", \"--info_file\", dest=\"info_file\", help=\"Information related", "options.database web_env = options.web_env query_key = options.query_key start_pos = int(options.start_pos); # should check", "% 0) info_file_fd.write('web_env: %s\\n' % web_env) info_file_fd.write('query_key: %s\\n'% query_key) info_file_fd.write('Downloading sequences in batches", "query. # to start a new download # entrez_download_fasta.py -m <EMAIL> -d nucleotide", "an Entrez query. # to start a new download # entrez_download_fasta.py -m <EMAIL>", "out.fasta -i download_info.txt # to continue a previous download # entrez_download_fasta.py -m <EMAIL>", "start_pos = int(options.start_pos); # should check if start position is a integer nr_seq_query", "raised when trying do an Entrez.esearch: %s\\n\" % str(e)) info_file_fd.close() sys.exit( \"Error raised!", "a previous download...\" if not options.email: print \"Please specify an email address. Need", "BATCH_SIZE) info_file_fd.close() # Now retrieve the FASTA sequences in batches of 5 fasta_file_fd", "sequences to be dowloaded: %d\\n' % nr_seq_query) info_file_fd.write('Download sequences from position: %d\\n' %", "downloading from (-s START_POS)\" return - 10 if not options.nr_seq_query: print \"Please specify", "= int(results[\"Count\"]) # assert count == len(gi_list) # do not do this test", "value in mapped_chars.items(): database = database.replace(value, key) email = email.replace(value, key) web_env =", "specific info to info_file info_file_fd.write('Number of sequences in original query: %d\\n' % nr_seq_query)", "specific info to info_file info_file_fd.write('Number of sequences in query: %d\\n' % nr_seq_query) info_file_fd.write('Number", "for key, value in mapped_chars.items(): database = database.replace(value, key) entrez_query = entrez_query.replace(value, key)", "(int(nr_seq_query) - int(start_pos) + 1)) info_file_fd.write('Download sequences from position: %d\\n' % start_pos) info_file_fd.write('web_env:", "in range(0,nr_seq_query, BATCH_SIZE): end = min(nr_seq_query, start + BATCH_SIZE) print \"Dowloading sequence %i", "info_file for writing info_file_fd = open(info_file, \"w\") info_file_fd.write('Email address: %s\\n' % Entrez.email) info_file_fd.write('Database:", "previous web_env.\") parser.add_option(\"-q\", \"--query_key\", dest=\"query_key\", help=\"Please provide the previous query_key.\") parser.add_option(\"-s\", \"--start_pos\", dest=\"start_pos\",", "min(nr_seq_query, start + BATCH_SIZE) print \"Dowloading sequence %i to %i\" % (start+1, end)", "parser.add_option(\"-f\", \"--fasta_file\", dest=\"fasta_file\", help=\"FASTA output file\") parser.add_option(\"-i\", \"--info_file\", dest=\"info_file\", help=\"Information related to the", "return - 8 if not options.query_key: print \"Please specify the previous query_key (-q", "Write query specific info to info_file info_file_fd.write('Number of sequences in query: %d\\n' %", "%d\\n' % BATCH_SIZE) info_file_fd.close() # Now retrieve the FASTA sequences in batches of", "key) email = email.replace(value, key) web_env = web_env.replace(value, key) query_key = query_key.replace(value, key)", "now!\") # do not exit anymore fetch_handle.close() fasta_file_fd.write(data) info_file_fd = open(info_file, \"a\") info_file_fd.write('Downloaded", "data = fetch_handle.read() except Exception, e: info_file_fd = open(info_file, \"a\") info_file_fd.write( \"Error raised", "fetch info from (-d DATABASE)\" return - 7 if not options.web_env: print \"Please", "'<' :'__lt__', '\\'' :'__sq__', '\"' :'__dq__', '[' :'__ob__', ']' :'__cb__', '{' :'__oc__', '}'", "Start new download and (2) Continue previous download. Need to handle these separately.", "print \"Please specify the number of sequences in original query (-n NR_SEQ_QUERY)\" return", "info_file_fd.write( \"Error raised when trying do an Entrez.esearch: %s\\n\" % str(e)) info_file_fd.close() sys.exit(", "5 # Need to to some checking on the on the length of", "info_file_fd = open(info_file, \"a\") info_file_fd.write('Downloaded sequence %i to %i\\n' % (start+1, end)) info_file_fd.close()", "info_file_fd.write('Downloaded sequence %i to %i\\n' % (start+1, end)) info_file_fd.close() fasta_file_fd.close() else: # Continue", "options.entrez_query: print \"Please specify an entrez query (-e ENTREZ_QUERY)\" return - 3 if", "the position of the sequence to start downloading from (-s START_POS)\" return -", "fasta_file_fd = open(fasta_file, \"w\") for start in range(0,nr_seq_query, BATCH_SIZE): end = min(nr_seq_query, start", "entrez_query) try: handle = Entrez.esearch(db=database,term=entrez_query, usehistory='y') results = Entrez.read(handle) handle.close() except Exception, e:", "download. Need to handle these separately. Sort it out later. Not to crucial.", "handle.close() except Exception, e: info_file_fd.write( \"Error raised when trying do an Entrez.esearch: %s\\n\"", "info_file_fd.write('Download sequences from position: %d\\n' % start_pos) info_file_fd.write('web_env: %s\\n' % web_env) info_file_fd.write('query_key: %s\\n'", "10 if not options.nr_seq_query: print \"Please specify the number of sequences in original", "be dowloaded: %d\\n' % (int(nr_seq_query) - int(start_pos) + 1)) info_file_fd.write('Download sequences from position:", "\"Please specify the database to fetch info from (-d DATABASE)\" return - 7", "sys.exit( \"Error raised! Exiting now!\") gi_list = results[\"IdList\"] nr_seq_query = int(results[\"Count\"]) # assert", "3 if not options.fasta_file: print \"Please specify the FASTA output file (-f FASTA_FILE)\"", "options.fasta_file: print \"Please specify the FASTA output file (-f FASTA_FILE)\" return - 12", "database to fetch info from (-d DATABASE)\" return - 2 if not options.entrez_query:", "os import string import re from optparse import OptionParser from Bio import Entrez", "% web_env) info_file_fd.write('query_key: %s\\n'% query_key) info_file_fd.write('Downloading sequences in batches of %d\\n' % BATCH_SIZE)", "- 6 if not options.database: print \"Please specify the database to fetch info", "query specific info to info_file info_file_fd.write('Number of sequences in query: %d\\n' % nr_seq_query)", "return - 3 if not options.fasta_file: print \"Please specify the FASTA output file", "\"Error raised! Exiting now!\") # do not exit anymore fetch_handle.close() fasta_file_fd.write(data) info_file_fd =", "import string import re from optparse import OptionParser from Bio import Entrez def", "\"Please specify the previous query_key (-q QUERY_KEY)\" return - 9 if not options.start_pos:", "(-m EMAIL)\" return - 1 if not options.database: print \"Please specify the database", "START_POS -n NR_SEQ_QUERY -f FASTA_FILE -i INFO_FILE\" parser = OptionParser(usage=usage) parser.add_option(\"-m\", \"--email\", dest=\"email\",", "the WEB_ENV, QUERY_KEY and SEQ_START\") parser.add_option(\"-w\", \"--web_env\", dest=\"web_env\", help=\"Please provide the previous web_env.\")", "end)) info_file_fd.close() fasta_file_fd.close() else: # Continue a previous download email = options.email database", "trying do an Entrez.efind: %s\\n\" % str(e)) info_file_fd.close() sys.exit( \"Error raised! Exiting now!\")", "info_file_fd.write('query_key: %s\\n'% query_key) info_file_fd.write('Downloading sequences in batches of %d\\n' % BATCH_SIZE) info_file_fd.close() #", "if (len(args) > 0): print \"Too many arguments\" return - 14 BATCH_SIZE =", "batches of 5 fasta_file_fd = open(fasta_file, \"w\") for start in range(0,nr_seq_query, BATCH_SIZE): end", "web_env) info_file_fd.write('query_key: %s\\n' % query_key) info_file_fd.write('Downloading sequences in batches of %d\\n' % BATCH_SIZE)", "when doing an Entrez search or fetch (-m EMAIL)\" return - 1 if", "an Entrez search and fetch\") parser.add_option(\"-d\", \"--database\", dest=\"database\", help=\"Database e.g. nucleotide\") parser.add_option(\"-e\", \"--entrez_query\",", "return - 2 if not options.entrez_query: print \"Please specify an entrez query (-e", "and fetch\") parser.add_option(\"-d\", \"--database\", dest=\"database\", help=\"Database e.g. nucleotide\") parser.add_option(\"-e\", \"--entrez_query\", dest=\"entrez_query\", help=\"Entrez query", "of the sequence to start downloading from (-s START_POS)\" return - 10 if", "== len(gi_list) # do not do this test gi_list it is always 20", "download_info.txt # to continue a previous download # entrez_download_fasta.py -m <EMAIL> -d nucleotide", "= Entrez.efetch(db=database, rettype=\"fasta\", retstart=start, retmax=BATCH_SIZE, webenv=web_env, query_key=query_key) data = fetch_handle.read() except Exception, e:", "\"Error raised! Exiting now!\") fetch_handle.close() fasta_file_fd.write(data) info_file_fd = open(info_file, \"a\") info_file_fd.write('Downloaded sequence %i", "do an Entrez.efind: %s\\n\" % str(e)) info_file_fd.close() sys.exit( \"Error raised! Exiting now!\") fetch_handle.close()", "is a integer nr_seq_query = int(options.nr_seq_query); # should check if nr_seq_query is a", "e.g. nucleotide\") parser.add_option(\"-e\", \"--entrez_query\", dest=\"entrez_query\", help=\"Entrez query e.g. \\\"Bacteria\\\"[Organism] OR \\\"Archaea\\\"[Organism] OR prokaryotes[All", "not exit anymore fetch_handle.close() fasta_file_fd.write(data) info_file_fd = open(info_file, \"a\") info_file_fd.write('Downloaded sequence %i to", "to be dowloaded: %d\\n' % (int(nr_seq_query) - int(start_pos) + 1)) info_file_fd.write('Download sequences from", "provide the previous web_env.\") parser.add_option(\"-q\", \"--query_key\", dest=\"query_key\", help=\"Please provide the previous query_key.\") parser.add_option(\"-s\",", "-c -w \"NCID_1_38065753_192.168.127.12_9001_1300878409_78339627\" -q 1 -s 11 -n 38 -f out_continue.fasta -i info_continue.txt", "<EMAIL> -d nucleotide -c -w \"NCID_1_38065753_192.168.127.12_9001_1300878409_78339627\" -q 1 -s 11 -n 38 -f", "the FASTA output file (-f FASTA_FILE)\" return - 12 if not options.info_file: print", "entrez_download_fasta.py -m <EMAIL> -d nucleotide -c -w \"NCID_1_38065753_192.168.127.12_9001_1300878409_78339627\" -q 1 -s 11 -n", "-q QUERY_KEY -s START_POS -n NR_SEQ_QUERY -f FASTA_FILE -i INFO_FILE\" parser = OptionParser(usage=usage)", "= \"usage: %prog -m EMAIL -d DATABASE -e ENTREZ_QUERY -c -w WEB_ENV -q", "specified program will continue a previous download. User need to provide the WEB_ENV,", "13 if (len(args) > 0): print \"Too many arguments\" return - 14 BATCH_SIZE", "mapped_chars = { '>' :'__gt__', '<' :'__lt__', '\\'' :'__sq__', '\"' :'__dq__', '[' :'__ob__',", "the on the length of the arguments provided. Currently not working because we", "str(e)) info_file_fd.close() sys.exit( \"Error raised! Exiting now!\") fetch_handle.close() fasta_file_fd.write(data) info_file_fd = open(info_file, \"a\")", "info_file_fd.write('Email address: %s\\n' % Entrez.email) info_file_fd.write('Database: %s\\n' % database) # Write query specific", "\"--entrez_query\", dest=\"entrez_query\", help=\"Entrez query e.g. \\\"Bacteria\\\"[Organism] OR \\\"Archaea\\\"[Organism] OR prokaryotes[All Fields] not \\\"Escherichia", "info_file_fd.close() info_file_fd = open(info_file, \"a\") # Now retrieve the FASTA sequences in batches", "download...\" if not options.email: print \"Please specify an email address. Need to provide", "found in the original query.\") parser.add_option(\"-f\", \"--fasta_file\", dest=\"fasta_file\", help=\"FASTA output file\") parser.add_option(\"-i\", \"--info_file\",", "fetch_handle.read() except Exception, e: info_file_fd = open(info_file, \"a\") info_file_fd.write( \"Error raised when trying", "= options.email database = options.database web_env = options.web_env query_key = options.query_key start_pos =", "# entrez_download_fasta.py -m <EMAIL> -d nucleotide -e \"Cypripedioideae[Orgn] AND matK[Gene]\" -f out.fasta -i", "sys import os import string import re from optparse import OptionParser from Bio", "dest=\"info_file\", help=\"Information related to the download. Contains the web_env and query_key for the", "always 20 # Get web_env and query_key from search results web_env = results[\"WebEnv\"]", "query_key.replace(value, key) Entrez.email = email # Open info_file for writing info_file_fd = open(info_file,", "info_file_fd.write('Number of sequences in original query: %d\\n' % nr_seq_query) info_file_fd.write('Number of sequences to", "when trying do an Entrez.esearch: %s\\n\" % str(e)) info_file_fd.close() sys.exit( \"Error raised! Exiting", "= database.replace(value, key) email = email.replace(value, key) web_env = web_env.replace(value, key) query_key =", "options.web_env query_key = options.query_key start_pos = int(options.start_pos); # should check if start position", "provide the previous query_key.\") parser.add_option(\"-s\", \"--start_pos\", dest=\"start_pos\", help=\"Please provide position of the sequence", "is always 20 # Get web_env and query_key from search results web_env =", "info_file_fd.write('Downloading sequences in batches of %d\\n' % BATCH_SIZE) info_file_fd.close() # Now retrieve the", "INFO_FILE)\" return - 5 # Need to to some checking on the on", "an Entrez search or fetch (-m EMAIL)\" return - 6 if not options.database:", "= results[\"QueryKey\"] # Write query specific info to info_file info_file_fd.write('Number of sequences in", "of %d\\n' % BATCH_SIZE) info_file_fd.close() info_file_fd = open(info_file, \"a\") # Now retrieve the", "= options.email database = options.database entrez_query = options.entrez_query fasta_file = options.fasta_file info_file =", "if not options.fasta_file: print \"Please specify the FASTA output file (-f FASTA_FILE)\" return", "of sequences in query: %d\\n' % nr_seq_query) info_file_fd.write('Number of sequences to be dowloaded:", "%s\\n' % query_key) info_file_fd.write('Downloading sequences in batches of %d\\n' % BATCH_SIZE) info_file_fd.close() info_file_fd", "working because we have 2 options # (1) Start new download and (2)", "return - 13 if (len(args) > 0): print \"Too many arguments\" return -", "%s\\n'% query_key) info_file_fd.write('Downloading sequences in batches of %d\\n' % BATCH_SIZE) info_file_fd.close() # Now", "info_file_fd.write('Number of sequences to be dowloaded: %d\\n' % nr_seq_query) info_file_fd.write('Download sequences from position:", "Entrez def main(): usage = \"usage: %prog -m EMAIL -d DATABASE -e ENTREZ_QUERY", "options.web_env: print \"Please specify the previous web_env (-w WEB_ENV)\" return - 8 if", "many arguments\" return - 14 BATCH_SIZE = 100 # Input strings generated by", "- 14 BATCH_SIZE = 100 # Input strings generated by browser/galaxy needs to", "nucleotide -c -w \"NCID_1_38065753_192.168.127.12_9001_1300878409_78339627\" -q 1 -s 11 -n 38 -f out_continue.fasta -i", "integer nr_seq_query = int(options.nr_seq_query); # should check if nr_seq_query is a integer fasta_file", "Entrez.email) info_file_fd.write('Database: %s\\n' % database) info_file_fd.write('Entrez query: %s\\n' % entrez_query) try: handle =", "OR \\\"Archaea\\\"[Organism] OR prokaryotes[All Fields] not \\\"Escherichia coli\\\"[Organism]\") parser.add_option(\"-c\", \"--continue_download\", action=\"store_true\", dest=\"continue_download\", default=False,", "BATCH_SIZE fasta_file_fd = open(fasta_file, \"w\") for start in range(start_pos - 1,nr_seq_query, BATCH_SIZE): end", "if not options.entrez_query: print \"Please specify an entrez query (-e ENTREZ_QUERY)\" return -", ":'__lt__', '\\'' :'__sq__', '\"' :'__dq__', '[' :'__ob__', ']' :'__cb__', '{' :'__oc__', '}' :'__cc__',", "print \"Please specify an entrez query (-e ENTREZ_QUERY)\" return - 3 if not", "a new download # entrez_download_fasta.py -m <EMAIL> -d nucleotide -e \"Cypripedioideae[Orgn] AND matK[Gene]\"", "parser.parse_args() if not options.continue_download: print \"Start a new download...\" if not options.email: print", "print \"Please specify an email address. Need to provide this when doing an", "not options.info_file: print \"Please specify the download info file (-i INFO_FILE)\" return -", "return - 9 if not options.start_pos: print \"Please specify the position of the", "print \"Continue a previous download...\" if not options.email: print \"Please specify an email", "if(not options.continue_download): email = options.email database = options.database entrez_query = options.entrez_query fasta_file =", "\"Error raised when trying do an Entrez.efind: %s\\n\" % str(e)) info_file_fd.write( \"Retrying...\") info_file_fd.close()", "generated by browser/galaxy needs to be replaced mapped_chars = { '>' :'__gt__', '<'", "this when doing an Entrez search and fetch\") parser.add_option(\"-d\", \"--database\", dest=\"database\", help=\"Database e.g.", "web_env = results[\"WebEnv\"] query_key = results[\"QueryKey\"] # Write query specific info to info_file", "= open(fasta_file, \"w\") for start in range(0,nr_seq_query, BATCH_SIZE): end = min(nr_seq_query, start +", "info_file_fd.close() # sys.exit( \"Error raised! Exiting now!\") # do not exit anymore fetch_handle.close()", "now!\") fetch_handle.close() fasta_file_fd.write(data) info_file_fd = open(info_file, \"a\") info_file_fd.write('Downloaded sequence %i to %i\\n' %", "parser.add_option(\"-d\", \"--database\", dest=\"database\", help=\"Database e.g. nucleotide\") parser.add_option(\"-e\", \"--entrez_query\", dest=\"entrez_query\", help=\"Entrez query e.g. \\\"Bacteria\\\"[Organism]", "return - 14 BATCH_SIZE = 100 # Input strings generated by browser/galaxy needs", "the number of sequences in original query (-n NR_SEQ_QUERY)\" return - 11 if", "download if(not options.continue_download): email = options.email database = options.database entrez_query = options.entrez_query fasta_file", "to the download. Contains the web_env and query_key for the search.\") (options, args)", "return - 6 if not options.database: print \"Please specify the database to fetch", "(-w WEB_ENV)\" return - 8 if not options.query_key: print \"Please specify the previous", "dest=\"start_pos\", help=\"Please provide position of the sequence to start downloading from. E.g. the", "the previous query_key (-q QUERY_KEY)\" return - 9 if not options.start_pos: print \"Please", "0) info_file_fd.write('web_env: %s\\n' % web_env) info_file_fd.write('query_key: %s\\n'% query_key) info_file_fd.write('Downloading sequences in batches of", "-s START_POS -n NR_SEQ_QUERY -f FASTA_FILE -i INFO_FILE\" parser = OptionParser(usage=usage) parser.add_option(\"-m\", \"--email\",", "= open(info_file, \"w\") info_file_fd.write('Email address: %s\\n' % Entrez.email) info_file_fd.write('Database: %s\\n' % database) info_file_fd.write('Entrez", "= open(info_file, \"a\") info_file_fd.write('Downloaded sequence %i to %i\\n' % (start+1, end)) info_file_fd.close() fasta_file_fd.close()", "OR prokaryotes[All Fields] not \\\"Escherichia coli\\\"[Organism]\") parser.add_option(\"-c\", \"--continue_download\", action=\"store_true\", dest=\"continue_download\", default=False, help=\"If flag", "the sequence to start downloading from. E.g. the position where the previous download", "options.database: print \"Please specify the database to fetch info from (-d DATABASE)\" return", "FASTA output file (-f FASTA_FILE)\" return - 12 if not options.info_file: print \"Please", "replaced mapped_chars = { '>' :'__gt__', '<' :'__lt__', '\\'' :'__sq__', '\"' :'__dq__', '['", "query.\") parser.add_option(\"-f\", \"--fasta_file\", dest=\"fasta_file\", help=\"FASTA output file\") parser.add_option(\"-i\", \"--info_file\", dest=\"info_file\", help=\"Information related to", "(-i INFO_FILE)\" return - 5 # Need to to some checking on the", "int(results[\"Count\"]) # assert count == len(gi_list) # do not do this test gi_list", "Continue a previous download email = options.email database = options.database web_env = options.web_env", "%s\\n' % web_env) info_file_fd.write('query_key: %s\\n' % query_key) info_file_fd.write('Downloading sequences in batches of %d\\n'", "sequences in batches of %d\\n' % BATCH_SIZE) info_file_fd.close() info_file_fd = open(info_file, \"a\") #", "it is always 20 # Get web_env and query_key from search results web_env", "- 1,nr_seq_query, BATCH_SIZE): end = min(nr_seq_query, start + BATCH_SIZE) print \"Dowloading sequence %i", "sys.exit( \"Error raised! Exiting now!\") fetch_handle.close() fasta_file_fd.write(data) info_file_fd = open(info_file, \"a\") info_file_fd.write('Downloaded sequence", "import os import string import re from optparse import OptionParser from Bio import", "%d\\n' % BATCH_SIZE) info_file_fd.close() info_file_fd = open(info_file, \"a\") # Now retrieve the FASTA", "# assert count == len(gi_list) # do not do this test gi_list it", "= int(options.start_pos); # should check if start position is a integer nr_seq_query =", "# sys.exit( \"Error raised! Exiting now!\") # do not exit anymore fetch_handle.close() fasta_file_fd.write(data)", "# Input strings generated by browser/galaxy needs to be replaced mapped_chars = {", ":'__cc__', '@' :'__at__', } # Start a new download if(not options.continue_download): email =", "entrez_query = options.entrez_query fasta_file = options.fasta_file info_file = options.info_file for key, value in", "help=\"Please provide the previous web_env.\") parser.add_option(\"-q\", \"--query_key\", dest=\"query_key\", help=\"Please provide the previous query_key.\")", "info_file_fd.write('Downloading sequences in batches of %d\\n' % BATCH_SIZE) info_file_fd.close() info_file_fd = open(info_file, \"a\")", "- 8 if not options.query_key: print \"Please specify the previous query_key (-q QUERY_KEY)\"", "%d\\n' % nr_seq_query) info_file_fd.write('Download sequences from position: %d\\n' % 0) info_file_fd.write('web_env: %s\\n' %", "or fetch (-m EMAIL)\" return - 6 if not options.database: print \"Please specify", "not options.nr_seq_query: print \"Please specify the number of sequences in original query (-n", "% str(e)) info_file_fd.write( \"Retrying...\") info_file_fd.close() # sys.exit( \"Error raised! Exiting now!\") # do", "start in range(start_pos - 1,nr_seq_query, BATCH_SIZE): end = min(nr_seq_query, start + BATCH_SIZE) print", "results[\"WebEnv\"] query_key = results[\"QueryKey\"] # Write query specific info to info_file info_file_fd.write('Number of", "position: %d\\n' % start_pos) info_file_fd.write('web_env: %s\\n' % web_env) info_file_fd.write('query_key: %s\\n' % query_key) info_file_fd.write('Downloading", "\"w\") for start in range(start_pos - 1,nr_seq_query, BATCH_SIZE): end = min(nr_seq_query, start +", "%s\\n\" % str(e)) info_file_fd.close() sys.exit( \"Error raised! Exiting now!\") fetch_handle.close() fasta_file_fd.write(data) info_file_fd =", "parser.add_option(\"-c\", \"--continue_download\", action=\"store_true\", dest=\"continue_download\", default=False, help=\"If flag is specified program will continue a", "download. User need to provide the WEB_ENV, QUERY_KEY and SEQ_START\") parser.add_option(\"-w\", \"--web_env\", dest=\"web_env\",", "should check if start position is a integer nr_seq_query = int(options.nr_seq_query); # should", "(-d DATABASE)\" return - 7 if not options.web_env: print \"Please specify the previous", "a integer nr_seq_query = int(options.nr_seq_query); # should check if nr_seq_query is a integer", "\"--fasta_file\", dest=\"fasta_file\", help=\"FASTA output file\") parser.add_option(\"-i\", \"--info_file\", dest=\"info_file\", help=\"Information related to the download.", "= database.replace(value, key) entrez_query = entrez_query.replace(value, key) email = email.replace(value, key) Entrez.email =", "for writing info_file_fd = open(info_file, \"w\") info_file_fd.write('Email address: %s\\n' % Entrez.email) info_file_fd.write('Database: %s\\n'", "\"Start a new download...\" if not options.email: print \"Please specify an email address.", "a new download if(not options.continue_download): email = options.email database = options.database entrez_query =", "QUERY_KEY and SEQ_START\") parser.add_option(\"-w\", \"--web_env\", dest=\"web_env\", help=\"Please provide the previous web_env.\") parser.add_option(\"-q\", \"--query_key\",", "results web_env = results[\"WebEnv\"] query_key = results[\"QueryKey\"] # Write query specific info to", "dest=\"database\", help=\"Database e.g. nucleotide\") parser.add_option(\"-e\", \"--entrez_query\", dest=\"entrez_query\", help=\"Entrez query e.g. \\\"Bacteria\\\"[Organism] OR \\\"Archaea\\\"[Organism]", "an Entrez search or fetch (-m EMAIL)\" return - 1 if not options.database:", "crucial. else: print \"Continue a previous download...\" if not options.email: print \"Please specify", "count == len(gi_list) # do not do this test gi_list it is always", "open(fasta_file, \"w\") for start in range(0,nr_seq_query, BATCH_SIZE): end = min(nr_seq_query, start + BATCH_SIZE)", "str(e)) info_file_fd.close() sys.exit( \"Error raised! Exiting now!\") gi_list = results[\"IdList\"] nr_seq_query = int(results[\"Count\"])", "info_file_fd.write('Downloaded sequence %i to %i\\n' % (start+1, end)) info_file_fd.close() fasta_file_fd.close() if __name__ ==", "to info_file info_file_fd.write('Number of sequences in query: %d\\n' % nr_seq_query) info_file_fd.write('Number of sequences", "range(0,nr_seq_query, BATCH_SIZE): end = min(nr_seq_query, start + BATCH_SIZE) print \"Dowloading sequence %i to", "download email = options.email database = options.database web_env = options.web_env query_key = options.query_key", "of 5 fasta_file_fd = open(fasta_file, \"w\") for start in range(0,nr_seq_query, BATCH_SIZE): end =", "position: %d\\n' % 0) info_file_fd.write('web_env: %s\\n' % web_env) info_file_fd.write('query_key: %s\\n'% query_key) info_file_fd.write('Downloading sequences", "need to provide the WEB_ENV, QUERY_KEY and SEQ_START\") parser.add_option(\"-w\", \"--web_env\", dest=\"web_env\", help=\"Please provide", "test gi_list it is always 20 # Get web_env and query_key from search", "- int(start_pos) + 1)) info_file_fd.write('Download sequences from position: %d\\n' % start_pos) info_file_fd.write('web_env: %s\\n'", "-m EMAIL -d DATABASE -e ENTREZ_QUERY -c -w WEB_ENV -q QUERY_KEY -s START_POS", "query_key) info_file_fd.write('Downloading sequences in batches of %d\\n' % BATCH_SIZE) info_file_fd.close() # Now retrieve", ":'__at__', } # Start a new download if(not options.continue_download): email = options.email database", "= options.entrez_query fasta_file = options.fasta_file info_file = options.info_file for key, value in mapped_chars.items():", "5 fasta_file_fd = open(fasta_file, \"w\") for start in range(0,nr_seq_query, BATCH_SIZE): end = min(nr_seq_query,", "database = options.database web_env = options.web_env query_key = options.query_key start_pos = int(options.start_pos); #", "sequences in original query (-n NR_SEQ_QUERY)\" return - 11 if not options.fasta_file: print", "% query_key) info_file_fd.write('Downloading sequences in batches of %d\\n' % BATCH_SIZE) info_file_fd.close() info_file_fd =", "gi_list it is always 20 # Get web_env and query_key from search results", "INFO_FILE)\" return - 13 if (len(args) > 0): print \"Too many arguments\" return", "original query: %d\\n' % nr_seq_query) info_file_fd.write('Number of sequences to be dowloaded: %d\\n' %", "a previous download. User need to provide the WEB_ENV, QUERY_KEY and SEQ_START\") parser.add_option(\"-w\",", "database) # Write query specific info to info_file info_file_fd.write('Number of sequences in original", "EMAIL)\" return - 1 if not options.database: print \"Please specify the database to", "trying do an Entrez.efind: %s\\n\" % str(e)) info_file_fd.write( \"Retrying...\") info_file_fd.close() # sys.exit( \"Error", "- 11 if not options.fasta_file: print \"Please specify the FASTA output file (-f", ":'__cb__', '{' :'__oc__', '}' :'__cc__', '@' :'__at__', } # Start a new download", "%d\\n' % (int(nr_seq_query) - int(start_pos) + 1)) info_file_fd.write('Download sequences from position: %d\\n' %", "specify the previous query_key (-q QUERY_KEY)\" return - 9 if not options.start_pos: print", "\"a\") info_file_fd.write('Downloaded sequence %i to %i\\n' % (start+1, end)) info_file_fd.close() fasta_file_fd.close() if __name__", "download # entrez_download_fasta.py -m <EMAIL> -d nucleotide -e \"Cypripedioideae[Orgn] AND matK[Gene]\" -f out.fasta", "start downloading from (-s START_POS)\" return - 10 if not options.nr_seq_query: print \"Please", "of sequences in original query: %d\\n' % nr_seq_query) info_file_fd.write('Number of sequences to be", "out later. Not to crucial. else: print \"Continue a previous download...\" if not", "to to some checking on the on the length of the arguments provided.", "when doing an Entrez search or fetch (-m EMAIL)\" return - 6 if", "a new download...\" if not options.email: print \"Please specify an email address. Need", "FASTA_FILE)\" return - 4 if not options.info_file: print \"Please specify the download info", "provided. Currently not working because we have 2 options # (1) Start new", "4 if not options.info_file: print \"Please specify the download info file (-i INFO_FILE)\"", "start + BATCH_SIZE) print \"Dowloading sequence %i to %i\" % (start+1, end) try:", "fetch_handle = Entrez.efetch(db=database, rettype=\"fasta\", retstart=start, retmax=BATCH_SIZE, webenv=web_env, query_key=query_key) data = fetch_handle.read() except Exception,", "Start a new download if(not options.continue_download): email = options.email database = options.database entrez_query", "this when doing an Entrez search or fetch (-m EMAIL)\" return - 6", "e: info_file_fd.write( \"Error raised when trying do an Entrez.esearch: %s\\n\" % str(e)) info_file_fd.close()", "provide the WEB_ENV, QUERY_KEY and SEQ_START\") parser.add_option(\"-w\", \"--web_env\", dest=\"web_env\", help=\"Please provide the previous", "options.fasta_file: print \"Please specify the FASTA output file (-f FASTA_FILE)\" return - 4", "- 4 if not options.info_file: print \"Please specify the download info file (-i", "Open info_file for writing info_file_fd = open(info_file, \"w\") info_file_fd.write('Email address: %s\\n' % Entrez.email)", "in batches of %d\\n' % BATCH_SIZE) info_file_fd.close() info_file_fd = open(info_file, \"a\") # Now", "info_continue.txt import sys import os import string import re from optparse import OptionParser", "when trying do an Entrez.efind: %s\\n\" % str(e)) info_file_fd.write( \"Retrying...\") info_file_fd.close() # sys.exit(", "\"--nr_seq_query\", dest=\"nr_seq_query\", help=\"Please provide the number of sequences found in the original query.\")", "(1) Start new download and (2) Continue previous download. Need to handle these", "continue a previous download. User need to provide the WEB_ENV, QUERY_KEY and SEQ_START\")", "info_file_fd.write('Number of sequences in query: %d\\n' % nr_seq_query) info_file_fd.write('Number of sequences to be", "or fetch (-m EMAIL)\" return - 1 if not options.database: print \"Please specify", "12 if not options.info_file: print \"Please specify the download info file (-i INFO_FILE)\"", "these separately. Sort it out later. Not to crucial. else: print \"Continue a", "+ BATCH_SIZE) print \"Dowloading sequence %i to %i\" % (start+1, end) try: fetch_handle", "of sequences found in the original query.\") parser.add_option(\"-f\", \"--fasta_file\", dest=\"fasta_file\", help=\"FASTA output file\")", "if not options.web_env: print \"Please specify the previous web_env (-w WEB_ENV)\" return -", "= Entrez.read(handle) handle.close() except Exception, e: info_file_fd.write( \"Error raised when trying do an", "sequences in original query: %d\\n' % nr_seq_query) info_file_fd.write('Number of sequences to be dowloaded:", "2 if not options.entrez_query: print \"Please specify an entrez query (-e ENTREZ_QUERY)\" return", "query_key) info_file_fd.write('Downloading sequences in batches of %d\\n' % BATCH_SIZE) info_file_fd.close() info_file_fd = open(info_file,", "\"w\") for start in range(0,nr_seq_query, BATCH_SIZE): end = min(nr_seq_query, start + BATCH_SIZE) print", "if not options.nr_seq_query: print \"Please specify the number of sequences in original query", "\"--database\", dest=\"database\", help=\"Database e.g. nucleotide\") parser.add_option(\"-e\", \"--entrez_query\", dest=\"entrez_query\", help=\"Entrez query e.g. \\\"Bacteria\\\"[Organism] OR", "the download info file (-i INFO_FILE)\" return - 13 if (len(args) > 0):", "not options.web_env: print \"Please specify the previous web_env (-w WEB_ENV)\" return - 8", "BATCH_SIZE = 100 # Input strings generated by browser/galaxy needs to be replaced", "check if nr_seq_query is a integer fasta_file = options.fasta_file info_file = options.info_file for", "AND matK[Gene]\" -f out.fasta -i download_info.txt # to continue a previous download #", "%s\\n' % Entrez.email) info_file_fd.write('Database: %s\\n' % database) # Write query specific info to", "continue a previous download # entrez_download_fasta.py -m <EMAIL> -d nucleotide -c -w \"NCID_1_38065753_192.168.127.12_9001_1300878409_78339627\"", "previous download failed.\") parser.add_option(\"-n\", \"--nr_seq_query\", dest=\"nr_seq_query\", help=\"Please provide the number of sequences found", "Currently not working because we have 2 options # (1) Start new download", "writing info_file_fd = open(info_file, \"w\") info_file_fd.write('Email address: %s\\n' % Entrez.email) info_file_fd.write('Database: %s\\n' %", "open(info_file, \"a\") # Now retrieve the FASTA sequences in batches of BATCH_SIZE fasta_file_fd", "e.g. \\\"Bacteria\\\"[Organism] OR \\\"Archaea\\\"[Organism] OR prokaryotes[All Fields] not \\\"Escherichia coli\\\"[Organism]\") parser.add_option(\"-c\", \"--continue_download\", action=\"store_true\",", "not options.email: print \"Please specify an email address. Need to provide this when", "provide position of the sequence to start downloading from. E.g. the position where", "% web_env) info_file_fd.write('query_key: %s\\n' % query_key) info_file_fd.write('Downloading sequences in batches of %d\\n' %", "info_file info_file_fd.write('Number of sequences in original query: %d\\n' % nr_seq_query) info_file_fd.write('Number of sequences", "info_file = options.info_file for key, value in mapped_chars.items(): database = database.replace(value, key) entrez_query", "batches of %d\\n' % BATCH_SIZE) info_file_fd.close() info_file_fd = open(info_file, \"a\") # Now retrieve", "value in mapped_chars.items(): database = database.replace(value, key) entrez_query = entrez_query.replace(value, key) email =", "to provide the WEB_ENV, QUERY_KEY and SEQ_START\") parser.add_option(\"-w\", \"--web_env\", dest=\"web_env\", help=\"Please provide the", "address. Need to provide this when doing an Entrez search or fetch (-m", "BATCH_SIZE): end = min(nr_seq_query, start + BATCH_SIZE) print \"Dowloading sequence %i to %i\"", "return - 7 if not options.web_env: print \"Please specify the previous web_env (-w", "search or fetch (-m EMAIL)\" return - 1 if not options.database: print \"Please", "len(gi_list) # do not do this test gi_list it is always 20 #", "if start position is a integer nr_seq_query = int(options.nr_seq_query); # should check if", "from position: %d\\n' % start_pos) info_file_fd.write('web_env: %s\\n' % web_env) info_file_fd.write('query_key: %s\\n' % query_key)", "doing an Entrez search or fetch (-m EMAIL)\" return - 1 if not", "open(info_file, \"a\") info_file_fd.write('Downloaded sequence %i to %i\\n' % (start+1, end)) info_file_fd.close() fasta_file_fd.close() else:", "FASTA sequences in batches of BATCH_SIZE fasta_file_fd = open(fasta_file, \"w\") for start in", "Entrez.efetch(db=database, rettype=\"fasta\", retstart=start, retmax=BATCH_SIZE, webenv=web_env, query_key=query_key) data = fetch_handle.read() except Exception, e: info_file_fd", "info_file_fd.write('web_env: %s\\n' % web_env) info_file_fd.write('query_key: %s\\n' % query_key) info_file_fd.write('Downloading sequences in batches of", "= entrez_query.replace(value, key) email = email.replace(value, key) Entrez.email = email # Open info_file", "Entrez.esearch(db=database,term=entrez_query, usehistory='y') results = Entrez.read(handle) handle.close() except Exception, e: info_file_fd.write( \"Error raised when", "download info file (-i INFO_FILE)\" return - 5 # Need to to some", "# Need to to some checking on the on the length of the", "raised when trying do an Entrez.efind: %s\\n\" % str(e)) info_file_fd.write( \"Retrying...\") info_file_fd.close() #", "dest=\"web_env\", help=\"Please provide the previous web_env.\") parser.add_option(\"-q\", \"--query_key\", dest=\"query_key\", help=\"Please provide the previous", "because we have 2 options # (1) Start new download and (2) Continue", "\"a\") info_file_fd.write('Downloaded sequence %i to %i\\n' % (start+1, end)) info_file_fd.close() fasta_file_fd.close() else: #", "dest=\"fasta_file\", help=\"FASTA output file\") parser.add_option(\"-i\", \"--info_file\", dest=\"info_file\", help=\"Information related to the download. Contains", ":'__gt__', '<' :'__lt__', '\\'' :'__sq__', '\"' :'__dq__', '[' :'__ob__', ']' :'__cb__', '{' :'__oc__',", "print \"Please specify the FASTA output file (-f FASTA_FILE)\" return - 4 if", "parser.add_option(\"-m\", \"--email\", dest=\"email\", help=\"Email address. Need to provide this when doing an Entrez", "\"Error raised when trying do an Entrez.esearch: %s\\n\" % str(e)) info_file_fd.close() sys.exit( \"Error", "nr_seq_query) info_file_fd.write('Download sequences from position: %d\\n' % 0) info_file_fd.write('web_env: %s\\n' % web_env) info_file_fd.write('query_key:", "try: handle = Entrez.esearch(db=database,term=entrez_query, usehistory='y') results = Entrez.read(handle) handle.close() except Exception, e: info_file_fd.write(", "not options.query_key: print \"Please specify the previous query_key (-q QUERY_KEY)\" return - 9", "file (-f FASTA_FILE)\" return - 4 if not options.info_file: print \"Please specify the", "Exiting now!\") fetch_handle.close() fasta_file_fd.write(data) info_file_fd = open(info_file, \"a\") info_file_fd.write('Downloaded sequence %i to %i\\n'", "- 3 if not options.fasta_file: print \"Please specify the FASTA output file (-f", "to continue a previous download # entrez_download_fasta.py -m <EMAIL> -d nucleotide -c -w", "provide this when doing an Entrez search or fetch (-m EMAIL)\" return -", "if not options.info_file: print \"Please specify the download info file (-i INFO_FILE)\" return", "BATCH_SIZE) print \"Dowloading sequence %i to %i\" % (start+1, end) try: fetch_handle =", "\"Please specify an entrez query (-e ENTREZ_QUERY)\" return - 3 if not options.fasta_file:", "to start downloading from (-s START_POS)\" return - 10 if not options.nr_seq_query: print", "we have 2 options # (1) Start new download and (2) Continue previous", "User need to provide the WEB_ENV, QUERY_KEY and SEQ_START\") parser.add_option(\"-w\", \"--web_env\", dest=\"web_env\", help=\"Please", "in query: %d\\n' % nr_seq_query) info_file_fd.write('Number of sequences to be dowloaded: %d\\n' %", "info to info_file info_file_fd.write('Number of sequences in original query: %d\\n' % nr_seq_query) info_file_fd.write('Number", "Entrez.efind: %s\\n\" % str(e)) info_file_fd.write( \"Retrying...\") info_file_fd.close() # sys.exit( \"Error raised! Exiting now!\")", "20 # Get web_env and query_key from search results web_env = results[\"WebEnv\"] query_key", "# Open info_file for writing info_file_fd = open(info_file, \"w\") info_file_fd.write('Email address: %s\\n' %", "help=\"Please provide the previous query_key.\") parser.add_option(\"-s\", \"--start_pos\", dest=\"start_pos\", help=\"Please provide position of the", "raised! Exiting now!\") gi_list = results[\"IdList\"] nr_seq_query = int(results[\"Count\"]) # assert count ==", "to start a new download # entrez_download_fasta.py -m <EMAIL> -d nucleotide -e \"Cypripedioideae[Orgn]", "and (2) Continue previous download. Need to handle these separately. Sort it out", "end) try: fetch_handle = Entrez.efetch(db=database, rettype=\"fasta\", retstart=start, retmax=BATCH_SIZE, webenv=web_env, query_key=query_key) data = fetch_handle.read()", "in original query: %d\\n' % nr_seq_query) info_file_fd.write('Number of sequences to be dowloaded: %d\\n'", "START_POS)\" return - 10 if not options.nr_seq_query: print \"Please specify the number of", "1 if not options.database: print \"Please specify the database to fetch info from", "search.\") (options, args) = parser.parse_args() if not options.continue_download: print \"Start a new download...\"", "# Get web_env and query_key from search results web_env = results[\"WebEnv\"] query_key =", "\"Retrying...\") info_file_fd.close() # sys.exit( \"Error raised! Exiting now!\") # do not exit anymore", "'}' :'__cc__', '@' :'__at__', } # Start a new download if(not options.continue_download): email", "on the on the length of the arguments provided. Currently not working because", "Entrez.email) info_file_fd.write('Database: %s\\n' % database) # Write query specific info to info_file info_file_fd.write('Number", "fetch info from (-d DATABASE)\" return - 2 if not options.entrez_query: print \"Please", "print \"Too many arguments\" return - 14 BATCH_SIZE = 100 # Input strings", "0): print \"Too many arguments\" return - 14 BATCH_SIZE = 100 # Input", "when trying do an Entrez.efind: %s\\n\" % str(e)) info_file_fd.close() sys.exit( \"Error raised! Exiting", "NCBI based on an Entrez query. # to start a new download #", "= options.info_file for key, value in mapped_chars.items(): database = database.replace(value, key) entrez_query =", "options.query_key start_pos = int(options.start_pos); # should check if start position is a integer", "from position: %d\\n' % 0) info_file_fd.write('web_env: %s\\n' % web_env) info_file_fd.write('query_key: %s\\n'% query_key) info_file_fd.write('Downloading", "from Bio import Entrez def main(): usage = \"usage: %prog -m EMAIL -d", "where the previous download failed.\") parser.add_option(\"-n\", \"--nr_seq_query\", dest=\"nr_seq_query\", help=\"Please provide the number of", "\"Please specify the download info file (-i INFO_FILE)\" return - 13 if (len(args)", "database.replace(value, key) entrez_query = entrez_query.replace(value, key) email = email.replace(value, key) Entrez.email = email", "database.replace(value, key) email = email.replace(value, key) web_env = web_env.replace(value, key) query_key = query_key.replace(value,", "args) = parser.parse_args() if not options.continue_download: print \"Start a new download...\" if not", "14 BATCH_SIZE = 100 # Input strings generated by browser/galaxy needs to be", "of the sequence to start downloading from. E.g. the position where the previous", "info_file_fd.close() fasta_file_fd.close() else: # Continue a previous download email = options.email database =", "options # (1) Start new download and (2) Continue previous download. Need to", "']' :'__cb__', '{' :'__oc__', '}' :'__cc__', '@' :'__at__', } # Start a new", "\"Error raised! Exiting now!\") gi_list = results[\"IdList\"] nr_seq_query = int(results[\"Count\"]) # assert count", "(-i INFO_FILE)\" return - 13 if (len(args) > 0): print \"Too many arguments\"", "\"Please specify the position of the sequence to start downloading from (-s START_POS)\"", "(-f FASTA_FILE)\" return - 12 if not options.info_file: print \"Please specify the download", "on an Entrez query. # to start a new download # entrez_download_fasta.py -m", "not working because we have 2 options # (1) Start new download and", "return - 10 if not options.nr_seq_query: print \"Please specify the number of sequences", "sequences found in the original query.\") parser.add_option(\"-f\", \"--fasta_file\", dest=\"fasta_file\", help=\"FASTA output file\") parser.add_option(\"-i\",", "NR_SEQ_QUERY -f FASTA_FILE -i INFO_FILE\" parser = OptionParser(usage=usage) parser.add_option(\"-m\", \"--email\", dest=\"email\", help=\"Email address.", "Need to provide this when doing an Entrez search and fetch\") parser.add_option(\"-d\", \"--database\",", "FASTA_FILE)\" return - 12 if not options.info_file: print \"Please specify the download info", "key, value in mapped_chars.items(): database = database.replace(value, key) email = email.replace(value, key) web_env", "parser.add_option(\"-n\", \"--nr_seq_query\", dest=\"nr_seq_query\", help=\"Please provide the number of sequences found in the original", "the previous download failed.\") parser.add_option(\"-n\", \"--nr_seq_query\", dest=\"nr_seq_query\", help=\"Please provide the number of sequences", "strings generated by browser/galaxy needs to be replaced mapped_chars = { '>' :'__gt__',", "= open(info_file, \"a\") # Now retrieve the FASTA sequences in batches of BATCH_SIZE", "key) email = email.replace(value, key) Entrez.email = email # Open info_file for writing", "fetch (-m EMAIL)\" return - 1 if not options.database: print \"Please specify the", "web_env.\") parser.add_option(\"-q\", \"--query_key\", dest=\"query_key\", help=\"Please provide the previous query_key.\") parser.add_option(\"-s\", \"--start_pos\", dest=\"start_pos\", help=\"Please", "% (start+1, end) try: fetch_handle = Entrez.efetch(db=database, rettype=\"fasta\", retstart=start, retmax=BATCH_SIZE, webenv=web_env, query_key=query_key) data", "browser/galaxy needs to be replaced mapped_chars = { '>' :'__gt__', '<' :'__lt__', '\\''", "%i to %i\\n' % (start+1, end)) info_file_fd.close() fasta_file_fd.close() else: # Continue a previous", "to provide this when doing an Entrez search or fetch (-m EMAIL)\" return", "\"w\") info_file_fd.write('Email address: %s\\n' % Entrez.email) info_file_fd.write('Database: %s\\n' % database) info_file_fd.write('Entrez query: %s\\n'", "sequence %i to %i\\n' % (start+1, end)) info_file_fd.close() fasta_file_fd.close() if __name__ == \"__main__\":", "from. E.g. the position where the previous download failed.\") parser.add_option(\"-n\", \"--nr_seq_query\", dest=\"nr_seq_query\", help=\"Please", "assert count == len(gi_list) # do not do this test gi_list it is", "not options.database: print \"Please specify the database to fetch info from (-d DATABASE)\"", "% (start+1, end)) info_file_fd.close() fasta_file_fd.close() else: # Continue a previous download email =", "-i download_info.txt # to continue a previous download # entrez_download_fasta.py -m <EMAIL> -d", "now!\") gi_list = results[\"IdList\"] nr_seq_query = int(results[\"Count\"]) # assert count == len(gi_list) #", "-m <EMAIL> -d nucleotide -c -w \"NCID_1_38065753_192.168.127.12_9001_1300878409_78339627\" -q 1 -s 11 -n 38", "(-e ENTREZ_QUERY)\" return - 3 if not options.fasta_file: print \"Please specify the FASTA", "specify the FASTA output file (-f FASTA_FILE)\" return - 4 if not options.info_file:", "a previous download email = options.email database = options.database web_env = options.web_env query_key", "batches of %d\\n' % BATCH_SIZE) info_file_fd.close() # Now retrieve the FASTA sequences in", "fasta_file_fd = open(fasta_file, \"w\") for start in range(start_pos - 1,nr_seq_query, BATCH_SIZE): end =", "options.email database = options.database entrez_query = options.entrez_query fasta_file = options.fasta_file info_file = options.info_file", "sequences in batches of BATCH_SIZE fasta_file_fd = open(fasta_file, \"w\") for start in range(start_pos", "6 if not options.database: print \"Please specify the database to fetch info from", "do not do this test gi_list it is always 20 # Get web_env", "E.g. the position where the previous download failed.\") parser.add_option(\"-n\", \"--nr_seq_query\", dest=\"nr_seq_query\", help=\"Please provide", "file (-i INFO_FILE)\" return - 5 # Need to to some checking on", "= options.query_key start_pos = int(options.start_pos); # should check if start position is a", "\"--web_env\", dest=\"web_env\", help=\"Please provide the previous web_env.\") parser.add_option(\"-q\", \"--query_key\", dest=\"query_key\", help=\"Please provide the", "38 -f out_continue.fasta -i info_continue.txt import sys import os import string import re", "file\") parser.add_option(\"-i\", \"--info_file\", dest=\"info_file\", help=\"Information related to the download. Contains the web_env and", "= query_key.replace(value, key) Entrez.email = email # Open info_file for writing info_file_fd =", "file (-f FASTA_FILE)\" return - 12 if not options.info_file: print \"Please specify the", "'>' :'__gt__', '<' :'__lt__', '\\'' :'__sq__', '\"' :'__dq__', '[' :'__ob__', ']' :'__cb__', '{'", "query_key=query_key) data = fetch_handle.read() except Exception, e: info_file_fd = open(info_file, \"a\") info_file_fd.write( \"Error", "Exiting now!\") gi_list = results[\"IdList\"] nr_seq_query = int(results[\"Count\"]) # assert count == len(gi_list)", "'[' :'__ob__', ']' :'__cb__', '{' :'__oc__', '}' :'__cc__', '@' :'__at__', } # Start", "file (-i INFO_FILE)\" return - 13 if (len(args) > 0): print \"Too many", "output file\") parser.add_option(\"-i\", \"--info_file\", dest=\"info_file\", help=\"Information related to the download. Contains the web_env", "} # Start a new download if(not options.continue_download): email = options.email database =", "results[\"QueryKey\"] # Write query specific info to info_file info_file_fd.write('Number of sequences in query:", "in range(start_pos - 1,nr_seq_query, BATCH_SIZE): end = min(nr_seq_query, start + BATCH_SIZE) print \"Dowloading", "do this test gi_list it is always 20 # Get web_env and query_key", "= 100 # Input strings generated by browser/galaxy needs to be replaced mapped_chars", "specify the previous web_env (-w WEB_ENV)\" return - 8 if not options.query_key: print", "Now retrieve the FASTA sequences in batches of 5 fasta_file_fd = open(fasta_file, \"w\")", "except Exception, e: info_file_fd = open(info_file, \"a\") info_file_fd.write( \"Error raised when trying do", "NR_SEQ_QUERY)\" return - 11 if not options.fasta_file: print \"Please specify the FASTA output", "previous query_key (-q QUERY_KEY)\" return - 9 if not options.start_pos: print \"Please specify", "for key, value in mapped_chars.items(): database = database.replace(value, key) email = email.replace(value, key)", "email.replace(value, key) web_env = web_env.replace(value, key) query_key = query_key.replace(value, key) Entrez.email = email", "1 -s 11 -n 38 -f out_continue.fasta -i info_continue.txt import sys import os", "print \"Please specify the previous web_env (-w WEB_ENV)\" return - 8 if not", "query specific info to info_file info_file_fd.write('Number of sequences in original query: %d\\n' %", "options.info_file for key, value in mapped_chars.items(): database = database.replace(value, key) entrez_query = entrez_query.replace(value,", "the previous web_env (-w WEB_ENV)\" return - 8 if not options.query_key: print \"Please", "and query_key from search results web_env = results[\"WebEnv\"] query_key = results[\"QueryKey\"] # Write", "dest=\"nr_seq_query\", help=\"Please provide the number of sequences found in the original query.\") parser.add_option(\"-f\",", "download info file (-i INFO_FILE)\" return - 13 if (len(args) > 0): print", "Write query specific info to info_file info_file_fd.write('Number of sequences in original query: %d\\n'", "the original query.\") parser.add_option(\"-f\", \"--fasta_file\", dest=\"fasta_file\", help=\"FASTA output file\") parser.add_option(\"-i\", \"--info_file\", dest=\"info_file\", help=\"Information", "except Exception, e: info_file_fd.write( \"Error raised when trying do an Entrez.esearch: %s\\n\" %", "arguments\" return - 14 BATCH_SIZE = 100 # Input strings generated by browser/galaxy", "the database to fetch info from (-d DATABASE)\" return - 7 if not", "-f out.fasta -i download_info.txt # to continue a previous download # entrez_download_fasta.py -m", "database = options.database entrez_query = options.entrez_query fasta_file = options.fasta_file info_file = options.info_file for", "\"a\") info_file_fd.write( \"Error raised when trying do an Entrez.efind: %s\\n\" % str(e)) info_file_fd.write(", "print \"Start a new download...\" if not options.email: print \"Please specify an email", "the FASTA output file (-f FASTA_FILE)\" return - 4 if not options.info_file: print", "dowloaded: %d\\n' % nr_seq_query) info_file_fd.write('Download sequences from position: %d\\n' % 0) info_file_fd.write('web_env: %s\\n'", "nucleotide -e \"Cypripedioideae[Orgn] AND matK[Gene]\" -f out.fasta -i download_info.txt # to continue a", "# to start a new download # entrez_download_fasta.py -m <EMAIL> -d nucleotide -e", "fetch (-m EMAIL)\" return - 6 if not options.database: print \"Please specify the", "return - 1 if not options.database: print \"Please specify the database to fetch", "info_file_fd.close() # Now retrieve the FASTA sequences in batches of 5 fasta_file_fd =", "downloading from. E.g. the position where the previous download failed.\") parser.add_option(\"-n\", \"--nr_seq_query\", dest=\"nr_seq_query\",", "\"Dowloading sequence %i to %i\" % (start+1, end) try: fetch_handle = Entrez.efetch(db=database, rettype=\"fasta\",", ":'__sq__', '\"' :'__dq__', '[' :'__ob__', ']' :'__cb__', '{' :'__oc__', '}' :'__cc__', '@' :'__at__',", "the number of sequences found in the original query.\") parser.add_option(\"-f\", \"--fasta_file\", dest=\"fasta_file\", help=\"FASTA", "= email.replace(value, key) web_env = web_env.replace(value, key) query_key = query_key.replace(value, key) Entrez.email =", "<gh_stars>0 #!/usr/bin/python # Retrieve FASTA sequence from NCBI based on an Entrez query.", "-w \"NCID_1_38065753_192.168.127.12_9001_1300878409_78339627\" -q 1 -s 11 -n 38 -f out_continue.fasta -i info_continue.txt import", "DATABASE -e ENTREZ_QUERY -c -w WEB_ENV -q QUERY_KEY -s START_POS -n NR_SEQ_QUERY -f", "7 if not options.web_env: print \"Please specify the previous web_env (-w WEB_ENV)\" return", "WEB_ENV)\" return - 8 if not options.query_key: print \"Please specify the previous query_key", "open(info_file, \"a\") info_file_fd.write( \"Error raised when trying do an Entrez.efind: %s\\n\" % str(e))", "if not options.query_key: print \"Please specify the previous query_key (-q QUERY_KEY)\" return -", "an Entrez.efind: %s\\n\" % str(e)) info_file_fd.close() sys.exit( \"Error raised! Exiting now!\") fetch_handle.close() fasta_file_fd.write(data)", "raised! Exiting now!\") fetch_handle.close() fasta_file_fd.write(data) info_file_fd = open(info_file, \"a\") info_file_fd.write('Downloaded sequence %i to", "Entrez.esearch: %s\\n\" % str(e)) info_file_fd.close() sys.exit( \"Error raised! Exiting now!\") gi_list = results[\"IdList\"]", "fasta_file_fd.close() else: # Continue a previous download email = options.email database = options.database", "to some checking on the on the length of the arguments provided. Currently", "open(info_file, \"a\") info_file_fd.write('Downloaded sequence %i to %i\\n' % (start+1, end)) info_file_fd.close() fasta_file_fd.close() if", "ENTREZ_QUERY -c -w WEB_ENV -q QUERY_KEY -s START_POS -n NR_SEQ_QUERY -f FASTA_FILE -i", "query (-n NR_SEQ_QUERY)\" return - 11 if not options.fasta_file: print \"Please specify the", "Entrez query. # to start a new download # entrez_download_fasta.py -m <EMAIL> -d", "> 0): print \"Too many arguments\" return - 14 BATCH_SIZE = 100 #", "sequence %i to %i\\n' % (start+1, end)) info_file_fd.close() fasta_file_fd.close() else: # Continue a", "be dowloaded: %d\\n' % nr_seq_query) info_file_fd.write('Download sequences from position: %d\\n' % 0) info_file_fd.write('web_env:", "-w WEB_ENV -q QUERY_KEY -s START_POS -n NR_SEQ_QUERY -f FASTA_FILE -i INFO_FILE\" parser", "if not options.continue_download: print \"Start a new download...\" if not options.email: print \"Please", "by browser/galaxy needs to be replaced mapped_chars = { '>' :'__gt__', '<' :'__lt__',", "specify the FASTA output file (-f FASTA_FILE)\" return - 12 if not options.info_file:", "%s\\n\" % str(e)) info_file_fd.write( \"Retrying...\") info_file_fd.close() # sys.exit( \"Error raised! Exiting now!\") #", "info_file_fd.write('Database: %s\\n' % database) info_file_fd.write('Entrez query: %s\\n' % entrez_query) try: handle = Entrez.esearch(db=database,term=entrez_query,", "entrez query (-e ENTREZ_QUERY)\" return - 3 if not options.fasta_file: print \"Please specify", "%s\\n' % web_env) info_file_fd.write('query_key: %s\\n'% query_key) info_file_fd.write('Downloading sequences in batches of %d\\n' %", "# should check if nr_seq_query is a integer fasta_file = options.fasta_file info_file =", "query_key = query_key.replace(value, key) Entrez.email = email # Open info_file for writing info_file_fd", "address: %s\\n' % Entrez.email) info_file_fd.write('Database: %s\\n' % database) info_file_fd.write('Entrez query: %s\\n' % entrez_query)", "-n NR_SEQ_QUERY -f FASTA_FILE -i INFO_FILE\" parser = OptionParser(usage=usage) parser.add_option(\"-m\", \"--email\", dest=\"email\", help=\"Email", "Entrez search or fetch (-m EMAIL)\" return - 6 if not options.database: print", "sequences from position: %d\\n' % 0) info_file_fd.write('web_env: %s\\n' % web_env) info_file_fd.write('query_key: %s\\n'% query_key)", "%d\\n' % start_pos) info_file_fd.write('web_env: %s\\n' % web_env) info_file_fd.write('query_key: %s\\n' % query_key) info_file_fd.write('Downloading sequences", "do an Entrez.efind: %s\\n\" % str(e)) info_file_fd.write( \"Retrying...\") info_file_fd.close() # sys.exit( \"Error raised!", "Entrez.efind: %s\\n\" % str(e)) info_file_fd.close() sys.exit( \"Error raised! Exiting now!\") fetch_handle.close() fasta_file_fd.write(data) info_file_fd", "BATCH_SIZE) info_file_fd.close() info_file_fd = open(info_file, \"a\") # Now retrieve the FASTA sequences in", "info file (-i INFO_FILE)\" return - 5 # Need to to some checking", "print \"Dowloading sequence %i to %i\" % (start+1, end) try: fetch_handle = Entrez.efetch(db=database,", "some checking on the on the length of the arguments provided. Currently not", "# Now retrieve the FASTA sequences in batches of BATCH_SIZE fasta_file_fd = open(fasta_file,", "dest=\"continue_download\", default=False, help=\"If flag is specified program will continue a previous download. User", "(2) Continue previous download. Need to handle these separately. Sort it out later.", "e: info_file_fd = open(info_file, \"a\") info_file_fd.write( \"Error raised when trying do an Entrez.efind:", "-d DATABASE -e ENTREZ_QUERY -c -w WEB_ENV -q QUERY_KEY -s START_POS -n NR_SEQ_QUERY", "Exception, e: info_file_fd = open(info_file, \"a\") info_file_fd.write( \"Error raised when trying do an", "key) query_key = query_key.replace(value, key) Entrez.email = email # Open info_file for writing", "= parser.parse_args() if not options.continue_download: print \"Start a new download...\" if not options.email:", "main(): usage = \"usage: %prog -m EMAIL -d DATABASE -e ENTREZ_QUERY -c -w", "\\\"Archaea\\\"[Organism] OR prokaryotes[All Fields] not \\\"Escherichia coli\\\"[Organism]\") parser.add_option(\"-c\", \"--continue_download\", action=\"store_true\", dest=\"continue_download\", default=False, help=\"If", "previous download email = options.email database = options.database web_env = options.web_env query_key =", "Entrez search and fetch\") parser.add_option(\"-d\", \"--database\", dest=\"database\", help=\"Database e.g. nucleotide\") parser.add_option(\"-e\", \"--entrez_query\", dest=\"entrez_query\",", "prokaryotes[All Fields] not \\\"Escherichia coli\\\"[Organism]\") parser.add_option(\"-c\", \"--continue_download\", action=\"store_true\", dest=\"continue_download\", default=False, help=\"If flag is", "print \"Please specify the FASTA output file (-f FASTA_FILE)\" return - 12 if", "do an Entrez.esearch: %s\\n\" % str(e)) info_file_fd.close() sys.exit( \"Error raised! Exiting now!\") gi_list", "the FASTA sequences in batches of BATCH_SIZE fasta_file_fd = open(fasta_file, \"w\") for start", "\"--start_pos\", dest=\"start_pos\", help=\"Please provide position of the sequence to start downloading from. E.g.", "\"a\") # Now retrieve the FASTA sequences in batches of BATCH_SIZE fasta_file_fd =", "range(start_pos - 1,nr_seq_query, BATCH_SIZE): end = min(nr_seq_query, start + BATCH_SIZE) print \"Dowloading sequence", "options.info_file: print \"Please specify the download info file (-i INFO_FILE)\" return - 5", "-e \"Cypripedioideae[Orgn] AND matK[Gene]\" -f out.fasta -i download_info.txt # to continue a previous", "nucleotide\") parser.add_option(\"-e\", \"--entrez_query\", dest=\"entrez_query\", help=\"Entrez query e.g. \\\"Bacteria\\\"[Organism] OR \\\"Archaea\\\"[Organism] OR prokaryotes[All Fields]", "based on an Entrez query. # to start a new download # entrez_download_fasta.py", "-s 11 -n 38 -f out_continue.fasta -i info_continue.txt import sys import os import", "specify an email address. Need to provide this when doing an Entrez search", "new download # entrez_download_fasta.py -m <EMAIL> -d nucleotide -e \"Cypripedioideae[Orgn] AND matK[Gene]\" -f", "\"Please specify the previous web_env (-w WEB_ENV)\" return - 8 if not options.query_key:", "coli\\\"[Organism]\") parser.add_option(\"-c\", \"--continue_download\", action=\"store_true\", dest=\"continue_download\", default=False, help=\"If flag is specified program will continue", "of sequences to be dowloaded: %d\\n' % (int(nr_seq_query) - int(start_pos) + 1)) info_file_fd.write('Download", "+ 1)) info_file_fd.write('Download sequences from position: %d\\n' % start_pos) info_file_fd.write('web_env: %s\\n' % web_env)", "ENTREZ_QUERY)\" return - 3 if not options.fasta_file: print \"Please specify the FASTA output", "QUERY_KEY)\" return - 9 if not options.start_pos: print \"Please specify the position of", "options.database entrez_query = options.entrez_query fasta_file = options.fasta_file info_file = options.info_file for key, value", "an email address. Need to provide this when doing an Entrez search or", "EMAIL -d DATABASE -e ENTREZ_QUERY -c -w WEB_ENV -q QUERY_KEY -s START_POS -n", "return - 12 if not options.info_file: print \"Please specify the download info file", "previous query_key.\") parser.add_option(\"-s\", \"--start_pos\", dest=\"start_pos\", help=\"Please provide position of the sequence to start", "= { '>' :'__gt__', '<' :'__lt__', '\\'' :'__sq__', '\"' :'__dq__', '[' :'__ob__', ']'", "to %i\\n' % (start+1, end)) info_file_fd.close() fasta_file_fd.close() else: # Continue a previous download", "specify an entrez query (-e ENTREZ_QUERY)\" return - 3 if not options.fasta_file: print", "usehistory='y') results = Entrez.read(handle) handle.close() except Exception, e: info_file_fd.write( \"Error raised when trying", "- 13 if (len(args) > 0): print \"Too many arguments\" return - 14", "-f out_continue.fasta -i info_continue.txt import sys import os import string import re from", "Fields] not \\\"Escherichia coli\\\"[Organism]\") parser.add_option(\"-c\", \"--continue_download\", action=\"store_true\", dest=\"continue_download\", default=False, help=\"If flag is specified", "\"usage: %prog -m EMAIL -d DATABASE -e ENTREZ_QUERY -c -w WEB_ENV -q QUERY_KEY", "Need to handle these separately. Sort it out later. Not to crucial. else:", "sequences in batches of 5 fasta_file_fd = open(fasta_file, \"w\") for start in range(0,nr_seq_query,", "needs to be replaced mapped_chars = { '>' :'__gt__', '<' :'__lt__', '\\'' :'__sq__',", "email = email.replace(value, key) Entrez.email = email # Open info_file for writing info_file_fd", "fasta_file_fd.write(data) info_file_fd = open(info_file, \"a\") info_file_fd.write('Downloaded sequence %i to %i\\n' % (start+1, end))", "arguments provided. Currently not working because we have 2 options # (1) Start", "options.email: print \"Please specify an email address. Need to provide this when doing", "print \"Please specify the previous query_key (-q QUERY_KEY)\" return - 9 if not", "to handle these separately. Sort it out later. Not to crucial. else: print", "Input strings generated by browser/galaxy needs to be replaced mapped_chars = { '>'", "help=\"Entrez query e.g. \\\"Bacteria\\\"[Organism] OR \\\"Archaea\\\"[Organism] OR prokaryotes[All Fields] not \\\"Escherichia coli\\\"[Organism]\") parser.add_option(\"-c\",", "help=\"If flag is specified program will continue a previous download. User need to", "\"Continue a previous download...\" if not options.email: print \"Please specify an email address.", "Contains the web_env and query_key for the search.\") (options, args) = parser.parse_args() if", "entrez_query = entrez_query.replace(value, key) email = email.replace(value, key) Entrez.email = email # Open", "%d\\n' % nr_seq_query) info_file_fd.write('Number of sequences to be dowloaded: %d\\n' % (int(nr_seq_query) -", "-m <EMAIL> -d nucleotide -e \"Cypripedioideae[Orgn] AND matK[Gene]\" -f out.fasta -i download_info.txt #", "web_env (-w WEB_ENV)\" return - 8 if not options.query_key: print \"Please specify the", "nr_seq_query) info_file_fd.write('Number of sequences to be dowloaded: %d\\n' % nr_seq_query) info_file_fd.write('Download sequences from", "string import re from optparse import OptionParser from Bio import Entrez def main():", "handle these separately. Sort it out later. Not to crucial. else: print \"Continue", "database = database.replace(value, key) email = email.replace(value, key) web_env = web_env.replace(value, key) query_key", "(-m EMAIL)\" return - 6 if not options.database: print \"Please specify the database", "new download...\" if not options.email: print \"Please specify an email address. Need to", "= open(fasta_file, \"w\") for start in range(start_pos - 1,nr_seq_query, BATCH_SIZE): end = min(nr_seq_query,", "str(e)) info_file_fd.write( \"Retrying...\") info_file_fd.close() # sys.exit( \"Error raised! Exiting now!\") # do not", "fetch_handle.close() fasta_file_fd.write(data) info_file_fd = open(info_file, \"a\") info_file_fd.write('Downloaded sequence %i to %i\\n' % (start+1,", "database) info_file_fd.write('Entrez query: %s\\n' % entrez_query) try: handle = Entrez.esearch(db=database,term=entrez_query, usehistory='y') results =", "mapped_chars.items(): database = database.replace(value, key) email = email.replace(value, key) web_env = web_env.replace(value, key)", "to %i\" % (start+1, end) try: fetch_handle = Entrez.efetch(db=database, rettype=\"fasta\", retstart=start, retmax=BATCH_SIZE, webenv=web_env,", "Now retrieve the FASTA sequences in batches of BATCH_SIZE fasta_file_fd = open(fasta_file, \"w\")", "FASTA output file (-f FASTA_FILE)\" return - 4 if not options.info_file: print \"Please", "- 12 if not options.info_file: print \"Please specify the download info file (-i", "not options.start_pos: print \"Please specify the position of the sequence to start downloading", "in batches of BATCH_SIZE fasta_file_fd = open(fasta_file, \"w\") for start in range(start_pos -", "(-n NR_SEQ_QUERY)\" return - 11 if not options.fasta_file: print \"Please specify the FASTA", "# Write query specific info to info_file info_file_fd.write('Number of sequences in original query:", "not options.fasta_file: print \"Please specify the FASTA output file (-f FASTA_FILE)\" return -", "of sequences to be dowloaded: %d\\n' % nr_seq_query) info_file_fd.write('Download sequences from position: %d\\n'", "info_file_fd.close() sys.exit( \"Error raised! Exiting now!\") fetch_handle.close() fasta_file_fd.write(data) info_file_fd = open(info_file, \"a\") info_file_fd.write('Downloaded", "= options.info_file for key, value in mapped_chars.items(): database = database.replace(value, key) email =", "(start+1, end)) info_file_fd.close() fasta_file_fd.close() else: # Continue a previous download email = options.email", "web_env) info_file_fd.write('query_key: %s\\n'% query_key) info_file_fd.write('Downloading sequences in batches of %d\\n' % BATCH_SIZE) info_file_fd.close()", "for start in range(0,nr_seq_query, BATCH_SIZE): end = min(nr_seq_query, start + BATCH_SIZE) print \"Dowloading", ":'__ob__', ']' :'__cb__', '{' :'__oc__', '}' :'__cc__', '@' :'__at__', } # Start a", "to crucial. else: print \"Continue a previous download...\" if not options.email: print \"Please", "and query_key for the search.\") (options, args) = parser.parse_args() if not options.continue_download: print", "info_file = options.info_file for key, value in mapped_chars.items(): database = database.replace(value, key) email", "1,nr_seq_query, BATCH_SIZE): end = min(nr_seq_query, start + BATCH_SIZE) print \"Dowloading sequence %i to", "return - 5 # Need to to some checking on the on the", "sequences in query: %d\\n' % nr_seq_query) info_file_fd.write('Number of sequences to be dowloaded: %d\\n'", "previous download. User need to provide the WEB_ENV, QUERY_KEY and SEQ_START\") parser.add_option(\"-w\", \"--web_env\",", "import sys import os import string import re from optparse import OptionParser from", "import re from optparse import OptionParser from Bio import Entrez def main(): usage", "- 5 # Need to to some checking on the on the length", "info_file_fd.write('Entrez query: %s\\n' % entrez_query) try: handle = Entrez.esearch(db=database,term=entrez_query, usehistory='y') results = Entrez.read(handle)", "# should check if start position is a integer nr_seq_query = int(options.nr_seq_query); #", "= web_env.replace(value, key) query_key = query_key.replace(value, key) Entrez.email = email # Open info_file", "int(options.nr_seq_query); # should check if nr_seq_query is a integer fasta_file = options.fasta_file info_file", "position where the previous download failed.\") parser.add_option(\"-n\", \"--nr_seq_query\", dest=\"nr_seq_query\", help=\"Please provide the number", "%s\\n' % entrez_query) try: handle = Entrez.esearch(db=database,term=entrez_query, usehistory='y') results = Entrez.read(handle) handle.close() except", "fasta_file = options.fasta_file info_file = options.info_file for key, value in mapped_chars.items(): database =", "position is a integer nr_seq_query = int(options.nr_seq_query); # should check if nr_seq_query is", "-e ENTREZ_QUERY -c -w WEB_ENV -q QUERY_KEY -s START_POS -n NR_SEQ_QUERY -f FASTA_FILE", "\"a\") info_file_fd.write( \"Error raised when trying do an Entrez.efind: %s\\n\" % str(e)) info_file_fd.close()", "- 2 if not options.entrez_query: print \"Please specify an entrez query (-e ENTREZ_QUERY)\"", "not options.entrez_query: print \"Please specify an entrez query (-e ENTREZ_QUERY)\" return - 3", "def main(): usage = \"usage: %prog -m EMAIL -d DATABASE -e ENTREZ_QUERY -c", "checking on the on the length of the arguments provided. Currently not working", "if not options.database: print \"Please specify the database to fetch info from (-d", "-f FASTA_FILE -i INFO_FILE\" parser = OptionParser(usage=usage) parser.add_option(\"-m\", \"--email\", dest=\"email\", help=\"Email address. Need", "info_file_fd.close() sys.exit( \"Error raised! Exiting now!\") gi_list = results[\"IdList\"] nr_seq_query = int(results[\"Count\"]) #", "key) web_env = web_env.replace(value, key) query_key = query_key.replace(value, key) Entrez.email = email #", "\"Too many arguments\" return - 14 BATCH_SIZE = 100 # Input strings generated", "sys.exit( \"Error raised! Exiting now!\") # do not exit anymore fetch_handle.close() fasta_file_fd.write(data) info_file_fd", "Retrieve FASTA sequence from NCBI based on an Entrez query. # to start", "from (-d DATABASE)\" return - 2 if not options.entrez_query: print \"Please specify an", "download and (2) Continue previous download. Need to handle these separately. Sort it", "to be replaced mapped_chars = { '>' :'__gt__', '<' :'__lt__', '\\'' :'__sq__', '\"'", "= open(info_file, \"a\") info_file_fd.write( \"Error raised when trying do an Entrez.efind: %s\\n\" %", "%s\\n' % database) info_file_fd.write('Entrez query: %s\\n' % entrez_query) try: handle = Entrez.esearch(db=database,term=entrez_query, usehistory='y')", "of %d\\n' % BATCH_SIZE) info_file_fd.close() # Now retrieve the FASTA sequences in batches", "'\"' :'__dq__', '[' :'__ob__', ']' :'__cb__', '{' :'__oc__', '}' :'__cc__', '@' :'__at__', }", "an Entrez.esearch: %s\\n\" % str(e)) info_file_fd.close() sys.exit( \"Error raised! Exiting now!\") gi_list =", "'\\'' :'__sq__', '\"' :'__dq__', '[' :'__ob__', ']' :'__cb__', '{' :'__oc__', '}' :'__cc__', '@'", "email.replace(value, key) Entrez.email = email # Open info_file for writing info_file_fd = open(info_file,", "is specified program will continue a previous download. User need to provide the", "= email # Open info_file for writing info_file_fd = open(info_file, \"w\") info_file_fd.write('Email address:", "-i INFO_FILE\" parser = OptionParser(usage=usage) parser.add_option(\"-m\", \"--email\", dest=\"email\", help=\"Email address. Need to provide", "download failed.\") parser.add_option(\"-n\", \"--nr_seq_query\", dest=\"nr_seq_query\", help=\"Please provide the number of sequences found in", "\"w\") info_file_fd.write('Email address: %s\\n' % Entrez.email) info_file_fd.write('Database: %s\\n' % database) # Write query", "program will continue a previous download. User need to provide the WEB_ENV, QUERY_KEY", "# Start a new download if(not options.continue_download): email = options.email database = options.database", "-c -w WEB_ENV -q QUERY_KEY -s START_POS -n NR_SEQ_QUERY -f FASTA_FILE -i INFO_FILE\"", "options.nr_seq_query: print \"Please specify the number of sequences in original query (-n NR_SEQ_QUERY)\"", "parser.add_option(\"-w\", \"--web_env\", dest=\"web_env\", help=\"Please provide the previous web_env.\") parser.add_option(\"-q\", \"--query_key\", dest=\"query_key\", help=\"Please provide", "\"Please specify the database to fetch info from (-d DATABASE)\" return - 2", "import Entrez def main(): usage = \"usage: %prog -m EMAIL -d DATABASE -e", "specify the download info file (-i INFO_FILE)\" return - 5 # Need to", ":'__oc__', '}' :'__cc__', '@' :'__at__', } # Start a new download if(not options.continue_download):", "(-s START_POS)\" return - 10 if not options.nr_seq_query: print \"Please specify the number", "(start+1, end) try: fetch_handle = Entrez.efetch(db=database, rettype=\"fasta\", retstart=start, retmax=BATCH_SIZE, webenv=web_env, query_key=query_key) data =", "to fetch info from (-d DATABASE)\" return - 2 if not options.entrez_query: print", "(-d DATABASE)\" return - 2 if not options.entrez_query: print \"Please specify an entrez", "(-q QUERY_KEY)\" return - 9 if not options.start_pos: print \"Please specify the position", "QUERY_KEY -s START_POS -n NR_SEQ_QUERY -f FASTA_FILE -i INFO_FILE\" parser = OptionParser(usage=usage) parser.add_option(\"-m\",", "from NCBI based on an Entrez query. # to start a new download", "# entrez_download_fasta.py -m <EMAIL> -d nucleotide -c -w \"NCID_1_38065753_192.168.127.12_9001_1300878409_78339627\" -q 1 -s 11", "11 -n 38 -f out_continue.fasta -i info_continue.txt import sys import os import string", "SEQ_START\") parser.add_option(\"-w\", \"--web_env\", dest=\"web_env\", help=\"Please provide the previous web_env.\") parser.add_option(\"-q\", \"--query_key\", dest=\"query_key\", help=\"Please", "- 9 if not options.start_pos: print \"Please specify the position of the sequence", "on the length of the arguments provided. Currently not working because we have", "info from (-d DATABASE)\" return - 7 if not options.web_env: print \"Please specify", "options.fasta_file info_file = options.info_file for key, value in mapped_chars.items(): database = database.replace(value, key)", "parser.add_option(\"-e\", \"--entrez_query\", dest=\"entrez_query\", help=\"Entrez query e.g. \\\"Bacteria\\\"[Organism] OR \\\"Archaea\\\"[Organism] OR prokaryotes[All Fields] not", "Need to provide this when doing an Entrez search or fetch (-m EMAIL)\"", "= results[\"IdList\"] nr_seq_query = int(results[\"Count\"]) # assert count == len(gi_list) # do not", "the download. Contains the web_env and query_key for the search.\") (options, args) =", "query_key (-q QUERY_KEY)\" return - 9 if not options.start_pos: print \"Please specify the", "-d nucleotide -c -w \"NCID_1_38065753_192.168.127.12_9001_1300878409_78339627\" -q 1 -s 11 -n 38 -f out_continue.fasta", "= OptionParser(usage=usage) parser.add_option(\"-m\", \"--email\", dest=\"email\", help=\"Email address. Need to provide this when doing", "%i to %i\" % (start+1, end) try: fetch_handle = Entrez.efetch(db=database, rettype=\"fasta\", retstart=start, retmax=BATCH_SIZE,", "open(info_file, \"w\") info_file_fd.write('Email address: %s\\n' % Entrez.email) info_file_fd.write('Database: %s\\n' % database) # Write", "sequence from NCBI based on an Entrez query. # to start a new", "web_env = web_env.replace(value, key) query_key = query_key.replace(value, key) Entrez.email = email # Open", "options.info_file: print \"Please specify the download info file (-i INFO_FILE)\" return - 13", "Sort it out later. Not to crucial. else: print \"Continue a previous download...\"", "fetch\") parser.add_option(\"-d\", \"--database\", dest=\"database\", help=\"Database e.g. nucleotide\") parser.add_option(\"-e\", \"--entrez_query\", dest=\"entrez_query\", help=\"Entrez query e.g.", "web_env and query_key from search results web_env = results[\"WebEnv\"] query_key = results[\"QueryKey\"] #", "parser.add_option(\"-s\", \"--start_pos\", dest=\"start_pos\", help=\"Please provide position of the sequence to start downloading from.", "if not options.email: print \"Please specify an email address. Need to provide this", "info_file info_file_fd.write('Number of sequences in query: %d\\n' % nr_seq_query) info_file_fd.write('Number of sequences to", "info_file_fd.write('query_key: %s\\n' % query_key) info_file_fd.write('Downloading sequences in batches of %d\\n' % BATCH_SIZE) info_file_fd.close()", "from (-s START_POS)\" return - 10 if not options.nr_seq_query: print \"Please specify the", "usage = \"usage: %prog -m EMAIL -d DATABASE -e ENTREZ_QUERY -c -w WEB_ENV", "new download if(not options.continue_download): email = options.email database = options.database entrez_query = options.entrez_query", "the previous web_env.\") parser.add_option(\"-q\", \"--query_key\", dest=\"query_key\", help=\"Please provide the previous query_key.\") parser.add_option(\"-s\", \"--start_pos\",", "help=\"Information related to the download. Contains the web_env and query_key for the search.\")", "results[\"IdList\"] nr_seq_query = int(results[\"Count\"]) # assert count == len(gi_list) # do not do", "= options.database entrez_query = options.entrez_query fasta_file = options.fasta_file info_file = options.info_file for key,", "import OptionParser from Bio import Entrez def main(): usage = \"usage: %prog -m", "position of the sequence to start downloading from (-s START_POS)\" return - 10", "specify the database to fetch info from (-d DATABASE)\" return - 2 if", "INFO_FILE\" parser = OptionParser(usage=usage) parser.add_option(\"-m\", \"--email\", dest=\"email\", help=\"Email address. Need to provide this", "it out later. Not to crucial. else: print \"Continue a previous download...\" if", "options.continue_download): email = options.email database = options.database entrez_query = options.entrez_query fasta_file = options.fasta_file", "to be dowloaded: %d\\n' % nr_seq_query) info_file_fd.write('Download sequences from position: %d\\n' % 0)", "\"Please specify the download info file (-i INFO_FILE)\" return - 5 # Need", "gi_list = results[\"IdList\"] nr_seq_query = int(results[\"Count\"]) # assert count == len(gi_list) # do", "open(fasta_file, \"w\") for start in range(start_pos - 1,nr_seq_query, BATCH_SIZE): end = min(nr_seq_query, start", "info_file_fd.write( \"Retrying...\") info_file_fd.close() # sys.exit( \"Error raised! Exiting now!\") # do not exit", "info_file_fd.write('Database: %s\\n' % database) # Write query specific info to info_file info_file_fd.write('Number of", "specify the position of the sequence to start downloading from (-s START_POS)\" return", "% nr_seq_query) info_file_fd.write('Download sequences from position: %d\\n' % 0) info_file_fd.write('web_env: %s\\n' % web_env)", "-n 38 -f out_continue.fasta -i info_continue.txt import sys import os import string import", "% str(e)) info_file_fd.close() sys.exit( \"Error raised! Exiting now!\") fetch_handle.close() fasta_file_fd.write(data) info_file_fd = open(info_file,", "-q 1 -s 11 -n 38 -f out_continue.fasta -i info_continue.txt import sys import", "# Write query specific info to info_file info_file_fd.write('Number of sequences in query: %d\\n'", "-d nucleotide -e \"Cypripedioideae[Orgn] AND matK[Gene]\" -f out.fasta -i download_info.txt # to continue", "a previous download # entrez_download_fasta.py -m <EMAIL> -d nucleotide -c -w \"NCID_1_38065753_192.168.127.12_9001_1300878409_78339627\" -q", "nr_seq_query) info_file_fd.write('Number of sequences to be dowloaded: %d\\n' % (int(nr_seq_query) - int(start_pos) +", "% nr_seq_query) info_file_fd.write('Number of sequences to be dowloaded: %d\\n' % nr_seq_query) info_file_fd.write('Download sequences", "info_file_fd.write( \"Error raised when trying do an Entrez.efind: %s\\n\" % str(e)) info_file_fd.write( \"Retrying...\")", "output file (-f FASTA_FILE)\" return - 12 if not options.info_file: print \"Please specify", "should check if nr_seq_query is a integer fasta_file = options.fasta_file info_file = options.info_file", "2 options # (1) Start new download and (2) Continue previous download. Need", "to start downloading from. E.g. the position where the previous download failed.\") parser.add_option(\"-n\",", "the search.\") (options, args) = parser.parse_args() if not options.continue_download: print \"Start a new", "query: %s\\n' % entrez_query) try: handle = Entrez.esearch(db=database,term=entrez_query, usehistory='y') results = Entrez.read(handle) handle.close()", "\\\"Bacteria\\\"[Organism] OR \\\"Archaea\\\"[Organism] OR prokaryotes[All Fields] not \\\"Escherichia coli\\\"[Organism]\") parser.add_option(\"-c\", \"--continue_download\", action=\"store_true\", dest=\"continue_download\",", "info_file_fd.write( \"Error raised when trying do an Entrez.efind: %s\\n\" % str(e)) info_file_fd.close() sys.exit(", "out_continue.fasta -i info_continue.txt import sys import os import string import re from optparse", "% str(e)) info_file_fd.close() sys.exit( \"Error raised! Exiting now!\") gi_list = results[\"IdList\"] nr_seq_query =", "not do this test gi_list it is always 20 # Get web_env and", "query: %d\\n' % nr_seq_query) info_file_fd.write('Number of sequences to be dowloaded: %d\\n' % nr_seq_query)", "query_key for the search.\") (options, args) = parser.parse_args() if not options.continue_download: print \"Start", "Exception, e: info_file_fd.write( \"Error raised when trying do an Entrez.esearch: %s\\n\" % str(e))", "info_file_fd = open(info_file, \"a\") info_file_fd.write( \"Error raised when trying do an Entrez.efind: %s\\n\"", "'{' :'__oc__', '}' :'__cc__', '@' :'__at__', } # Start a new download if(not", "%i\" % (start+1, end) try: fetch_handle = Entrez.efetch(db=database, rettype=\"fasta\", retstart=start, retmax=BATCH_SIZE, webenv=web_env, query_key=query_key)", "query: %d\\n' % nr_seq_query) info_file_fd.write('Number of sequences to be dowloaded: %d\\n' % (int(nr_seq_query)", "Exiting now!\") # do not exit anymore fetch_handle.close() fasta_file_fd.write(data) info_file_fd = open(info_file, \"a\")", "%i to %i\\n' % (start+1, end)) info_file_fd.close() fasta_file_fd.close() if __name__ == \"__main__\": sys.exit(main())", "% database) # Write query specific info to info_file info_file_fd.write('Number of sequences in", "(-f FASTA_FILE)\" return - 4 if not options.info_file: print \"Please specify the download", "# (1) Start new download and (2) Continue previous download. Need to handle", "% nr_seq_query) info_file_fd.write('Number of sequences to be dowloaded: %d\\n' % (int(nr_seq_query) - int(start_pos)", "Get web_env and query_key from search results web_env = results[\"WebEnv\"] query_key = results[\"QueryKey\"]", "= options.web_env query_key = options.query_key start_pos = int(options.start_pos); # should check if start", "% Entrez.email) info_file_fd.write('Database: %s\\n' % database) # Write query specific info to info_file", "info_file_fd = open(info_file, \"w\") info_file_fd.write('Email address: %s\\n' % Entrez.email) info_file_fd.write('Database: %s\\n' % database)", "web_env = options.web_env query_key = options.query_key start_pos = int(options.start_pos); # should check if", "help=\"Database e.g. nucleotide\") parser.add_option(\"-e\", \"--entrez_query\", dest=\"entrez_query\", help=\"Entrez query e.g. \\\"Bacteria\\\"[Organism] OR \\\"Archaea\\\"[Organism] OR", "number of sequences in original query (-n NR_SEQ_QUERY)\" return - 11 if not", "%prog -m EMAIL -d DATABASE -e ENTREZ_QUERY -c -w WEB_ENV -q QUERY_KEY -s", "the previous query_key.\") parser.add_option(\"-s\", \"--start_pos\", dest=\"start_pos\", help=\"Please provide position of the sequence to", "batches of BATCH_SIZE fasta_file_fd = open(fasta_file, \"w\") for start in range(start_pos - 1,nr_seq_query,", "when doing an Entrez search and fetch\") parser.add_option(\"-d\", \"--database\", dest=\"database\", help=\"Database e.g. nucleotide\")", "email = email.replace(value, key) web_env = web_env.replace(value, key) query_key = query_key.replace(value, key) Entrez.email", "entrez_download_fasta.py -m <EMAIL> -d nucleotide -e \"Cypripedioideae[Orgn] AND matK[Gene]\" -f out.fasta -i download_info.txt", "and SEQ_START\") parser.add_option(\"-w\", \"--web_env\", dest=\"web_env\", help=\"Please provide the previous web_env.\") parser.add_option(\"-q\", \"--query_key\", dest=\"query_key\",", "options.start_pos: print \"Please specify the position of the sequence to start downloading from", "if not options.start_pos: print \"Please specify the position of the sequence to start", "of BATCH_SIZE fasta_file_fd = open(fasta_file, \"w\") for start in range(start_pos - 1,nr_seq_query, BATCH_SIZE):", "in batches of 5 fasta_file_fd = open(fasta_file, \"w\") for start in range(0,nr_seq_query, BATCH_SIZE):", "nr_seq_query = int(results[\"Count\"]) # assert count == len(gi_list) # do not do this", "options.entrez_query fasta_file = options.fasta_file info_file = options.info_file for key, value in mapped_chars.items(): database", "flag is specified program will continue a previous download. User need to provide", "database = database.replace(value, key) entrez_query = entrez_query.replace(value, key) email = email.replace(value, key) Entrez.email", "# Now retrieve the FASTA sequences in batches of 5 fasta_file_fd = open(fasta_file,", "key) Entrez.email = email # Open info_file for writing info_file_fd = open(info_file, \"w\")", "print \"Please specify the position of the sequence to start downloading from (-s", "(len(args) > 0): print \"Too many arguments\" return - 14 BATCH_SIZE = 100", "doing an Entrez search and fetch\") parser.add_option(\"-d\", \"--database\", dest=\"database\", help=\"Database e.g. nucleotide\") parser.add_option(\"-e\",", "info from (-d DATABASE)\" return - 2 if not options.entrez_query: print \"Please specify", "# do not exit anymore fetch_handle.close() fasta_file_fd.write(data) info_file_fd = open(info_file, \"a\") info_file_fd.write('Downloaded sequence", "in batches of %d\\n' % BATCH_SIZE) info_file_fd.close() # Now retrieve the FASTA sequences", "\"Please specify the FASTA output file (-f FASTA_FILE)\" return - 4 if not", "% database) info_file_fd.write('Entrez query: %s\\n' % entrez_query) try: handle = Entrez.esearch(db=database,term=entrez_query, usehistory='y') results", "if nr_seq_query is a integer fasta_file = options.fasta_file info_file = options.info_file for key,", "position of the sequence to start downloading from. E.g. the position where the", "re from optparse import OptionParser from Bio import Entrez def main(): usage =", "parser.add_option(\"-i\", \"--info_file\", dest=\"info_file\", help=\"Information related to the download. Contains the web_env and query_key", "action=\"store_true\", dest=\"continue_download\", default=False, help=\"If flag is specified program will continue a previous download.", "failed.\") parser.add_option(\"-n\", \"--nr_seq_query\", dest=\"nr_seq_query\", help=\"Please provide the number of sequences found in the", "doing an Entrez search or fetch (-m EMAIL)\" return - 6 if not", "specify the number of sequences in original query (-n NR_SEQ_QUERY)\" return - 11", "in mapped_chars.items(): database = database.replace(value, key) entrez_query = entrez_query.replace(value, key) email = email.replace(value,", "\"Error raised when trying do an Entrez.efind: %s\\n\" % str(e)) info_file_fd.close() sys.exit( \"Error", "% BATCH_SIZE) info_file_fd.close() info_file_fd = open(info_file, \"a\") # Now retrieve the FASTA sequences", "info_file_fd = open(info_file, \"a\") # Now retrieve the FASTA sequences in batches of", "options.info_file for key, value in mapped_chars.items(): database = database.replace(value, key) email = email.replace(value,", "dest=\"query_key\", help=\"Please provide the previous query_key.\") parser.add_option(\"-s\", \"--start_pos\", dest=\"start_pos\", help=\"Please provide position of", "Entrez search or fetch (-m EMAIL)\" return - 1 if not options.database: print", "do not exit anymore fetch_handle.close() fasta_file_fd.write(data) info_file_fd = open(info_file, \"a\") info_file_fd.write('Downloaded sequence %i", "\"--continue_download\", action=\"store_true\", dest=\"continue_download\", default=False, help=\"If flag is specified program will continue a previous", "%d\\n' % 0) info_file_fd.write('web_env: %s\\n' % web_env) info_file_fd.write('query_key: %s\\n'% query_key) info_file_fd.write('Downloading sequences in", "info to info_file info_file_fd.write('Number of sequences in query: %d\\n' % nr_seq_query) info_file_fd.write('Number of", "not options.continue_download: print \"Start a new download...\" if not options.email: print \"Please specify", "results = Entrez.read(handle) handle.close() except Exception, e: info_file_fd.write( \"Error raised when trying do", "query_key from search results web_env = results[\"WebEnv\"] query_key = results[\"QueryKey\"] # Write query", "= min(nr_seq_query, start + BATCH_SIZE) print \"Dowloading sequence %i to %i\" % (start+1,", "original query (-n NR_SEQ_QUERY)\" return - 11 if not options.fasta_file: print \"Please specify", "(options, args) = parser.parse_args() if not options.continue_download: print \"Start a new download...\" if", "in the original query.\") parser.add_option(\"-f\", \"--fasta_file\", dest=\"fasta_file\", help=\"FASTA output file\") parser.add_option(\"-i\", \"--info_file\", dest=\"info_file\",", "email = options.email database = options.database entrez_query = options.entrez_query fasta_file = options.fasta_file info_file", "dest=\"email\", help=\"Email address. Need to provide this when doing an Entrez search and", "\"Please specify an email address. Need to provide this when doing an Entrez", "info_file_fd.write('Email address: %s\\n' % Entrez.email) info_file_fd.write('Database: %s\\n' % database) info_file_fd.write('Entrez query: %s\\n' %", "DATABASE)\" return - 7 if not options.web_env: print \"Please specify the previous web_env" ]
[ ".version import __version__, VERSION, metadataD from .ancillary import ProcessAncillary from .searchjson import SearchJsonTandemX,", "Package belonging to Karttur´s GeoImagine Framework. Author ------ <NAME> (<EMAIL>) \"\"\" from .version", "<NAME> (<EMAIL>) \"\"\" from .version import __version__, VERSION, metadataD from .ancillary import ProcessAncillary", "VERSION, metadataD from .ancillary import ProcessAncillary from .searchjson import SearchJsonTandemX, UnZipJsonTandemX __all__ =", "Karttur´s GeoImagine Framework. Author ------ <NAME> (<EMAIL>) \"\"\" from .version import __version__, VERSION,", "__version__, VERSION, metadataD from .ancillary import ProcessAncillary from .searchjson import SearchJsonTandemX, UnZipJsonTandemX __all__", "\"\"\" from .version import __version__, VERSION, metadataD from .ancillary import ProcessAncillary from .searchjson", "metadataD from .ancillary import ProcessAncillary from .searchjson import SearchJsonTandemX, UnZipJsonTandemX __all__ = ['ProcessAncillary']", "(<EMAIL>) \"\"\" from .version import __version__, VERSION, metadataD from .ancillary import ProcessAncillary from", "to Karttur´s GeoImagine Framework. Author ------ <NAME> (<EMAIL>) \"\"\" from .version import __version__,", "belonging to Karttur´s GeoImagine Framework. Author ------ <NAME> (<EMAIL>) \"\"\" from .version import", "GeoImagine Framework. Author ------ <NAME> (<EMAIL>) \"\"\" from .version import __version__, VERSION, metadataD", "\"\"\" ancillary ========================================== Package belonging to Karttur´s GeoImagine Framework. Author ------ <NAME> (<EMAIL>)", "========================================== Package belonging to Karttur´s GeoImagine Framework. Author ------ <NAME> (<EMAIL>) \"\"\" from", "Framework. Author ------ <NAME> (<EMAIL>) \"\"\" from .version import __version__, VERSION, metadataD from", "ancillary ========================================== Package belonging to Karttur´s GeoImagine Framework. Author ------ <NAME> (<EMAIL>) \"\"\"", "Author ------ <NAME> (<EMAIL>) \"\"\" from .version import __version__, VERSION, metadataD from .ancillary", "import __version__, VERSION, metadataD from .ancillary import ProcessAncillary from .searchjson import SearchJsonTandemX, UnZipJsonTandemX", "------ <NAME> (<EMAIL>) \"\"\" from .version import __version__, VERSION, metadataD from .ancillary import", "from .version import __version__, VERSION, metadataD from .ancillary import ProcessAncillary from .searchjson import" ]
[ "-= h; x -= 2.5; } }''' compute = cpp.generate('compute', 'void(std::string, double, double,", "= z * z + c; ++i; } return max_iter - i; }", "<< \" \"; x += h; } f << \"\\n\"; y -= h;", "sys import copperhead as cpp extra_compile_args = \"'/std:c++14'\" if sys.version.split('[')[1].startswith('MSC') else \"'-std=c++14'\" config", "0.0}; while (std::abs(z) < 2.0 && i < max_iter) { z = z", "filename, double x, double y, double h) { const auto n {std::lround(2.5 /", "+= h; } f << \"\\n\"; y -= h; x -= 2.5; }", "std::ofstream f(filename); for (long yidx {0}; yidx < n; ++yidx) { for (long", "while (std::abs(z) < 2.0 && i < max_iter) { z = z *", "double y, double h) { const auto n {std::lround(2.5 / h)}; std::ofstream f(filename);", "\"'-std=c++14'\" config = { 'extra_compile_args': extra_compile_args } mandelbrot_cpp = r''' #include <cmath> #include", "{0}; std::complex<double> z {0.0, 0.0}; while (std::abs(z) < 2.0 && i < max_iter)", "xidx < n; ++xidx) { f << mandelbrot(std::complex<double>(x, y)) << \" \"; x", "return max_iter - i; } void compute(std::string filename, double x, double y, double", "h; } f << \"\\n\"; y -= h; x -= 2.5; } }'''", "config = { 'extra_compile_args': extra_compile_args } mandelbrot_cpp = r''' #include <cmath> #include <complex>", "- i; } void compute(std::string filename, double x, double y, double h) {", "for (long yidx {0}; yidx < n; ++yidx) { for (long xidx {0};", "const auto n {std::lround(2.5 / h)}; std::ofstream f(filename); for (long yidx {0}; yidx", "std::complex<double> z {0.0, 0.0}; while (std::abs(z) < 2.0 && i < max_iter) {", "< n; ++xidx) { f << mandelbrot(std::complex<double>(x, y)) << \" \"; x +=", "i {0}; std::complex<double> z {0.0, 0.0}; while (std::abs(z) < 2.0 && i <", "r''' #include <cmath> #include <complex> #include <fstream> inline int mandelbrot(const std::complex<double> &c) {", "* z + c; ++i; } return max_iter - i; } void compute(std::string", "h) { const auto n {std::lround(2.5 / h)}; std::ofstream f(filename); for (long yidx", "compute = cpp.generate('compute', 'void(std::string, double, double, double)', mandelbrot_cpp, config=config) compute('fractal.dat', -2.0, 1.25, 0.005)", "cpp extra_compile_args = \"'/std:c++14'\" if sys.version.split('[')[1].startswith('MSC') else \"'-std=c++14'\" config = { 'extra_compile_args': extra_compile_args", "&& i < max_iter) { z = z * z + c; ++i;", "y -= h; x -= 2.5; } }''' compute = cpp.generate('compute', 'void(std::string, double,", "\"\\n\"; y -= h; x -= 2.5; } }''' compute = cpp.generate('compute', 'void(std::string,", "mandelbrot_cpp = r''' #include <cmath> #include <complex> #include <fstream> inline int mandelbrot(const std::complex<double>", "compute(std::string filename, double x, double y, double h) { const auto n {std::lround(2.5", "y, double h) { const auto n {std::lround(2.5 / h)}; std::ofstream f(filename); for", "(long yidx {0}; yidx < n; ++yidx) { for (long xidx {0}; xidx", "max_iter) { z = z * z + c; ++i; } return max_iter", "x, double y, double h) { const auto n {std::lround(2.5 / h)}; std::ofstream", "{ z = z * z + c; ++i; } return max_iter -", "i < max_iter) { z = z * z + c; ++i; }", "void compute(std::string filename, double x, double y, double h) { const auto n", "n; ++xidx) { f << mandelbrot(std::complex<double>(x, y)) << \" \"; x += h;", "#include <fstream> inline int mandelbrot(const std::complex<double> &c) { const int max_iter {100}; int", "2.0 && i < max_iter) { z = z * z + c;", "<< \"\\n\"; y -= h; x -= 2.5; } }''' compute = cpp.generate('compute',", "c; ++i; } return max_iter - i; } void compute(std::string filename, double x,", "} f << \"\\n\"; y -= h; x -= 2.5; } }''' compute", "max_iter - i; } void compute(std::string filename, double x, double y, double h)", "int i {0}; std::complex<double> z {0.0, 0.0}; while (std::abs(z) < 2.0 && i", "= r''' #include <cmath> #include <complex> #include <fstream> inline int mandelbrot(const std::complex<double> &c)", "<filename>examples/fractal.py import sys import copperhead as cpp extra_compile_args = \"'/std:c++14'\" if sys.version.split('[')[1].startswith('MSC') else", "++xidx) { f << mandelbrot(std::complex<double>(x, y)) << \" \"; x += h; }", "xidx {0}; xidx < n; ++xidx) { f << mandelbrot(std::complex<double>(x, y)) << \"", "extra_compile_args = \"'/std:c++14'\" if sys.version.split('[')[1].startswith('MSC') else \"'-std=c++14'\" config = { 'extra_compile_args': extra_compile_args }", "f << \"\\n\"; y -= h; x -= 2.5; } }''' compute =", "++i; } return max_iter - i; } void compute(std::string filename, double x, double", "n {std::lround(2.5 / h)}; std::ofstream f(filename); for (long yidx {0}; yidx < n;", "{ for (long xidx {0}; xidx < n; ++xidx) { f << mandelbrot(std::complex<double>(x,", "} void compute(std::string filename, double x, double y, double h) { const auto", "as cpp extra_compile_args = \"'/std:c++14'\" if sys.version.split('[')[1].startswith('MSC') else \"'-std=c++14'\" config = { 'extra_compile_args':", "y)) << \" \"; x += h; } f << \"\\n\"; y -=", "import copperhead as cpp extra_compile_args = \"'/std:c++14'\" if sys.version.split('[')[1].startswith('MSC') else \"'-std=c++14'\" config =", "max_iter {100}; int i {0}; std::complex<double> z {0.0, 0.0}; while (std::abs(z) < 2.0", "f << mandelbrot(std::complex<double>(x, y)) << \" \"; x += h; } f <<", "{0}; xidx < n; ++xidx) { f << mandelbrot(std::complex<double>(x, y)) << \" \";", "= \"'/std:c++14'\" if sys.version.split('[')[1].startswith('MSC') else \"'-std=c++14'\" config = { 'extra_compile_args': extra_compile_args } mandelbrot_cpp", "}''' compute = cpp.generate('compute', 'void(std::string, double, double, double)', mandelbrot_cpp, config=config) compute('fractal.dat', -2.0, 1.25,", "h)}; std::ofstream f(filename); for (long yidx {0}; yidx < n; ++yidx) { for", "(std::abs(z) < 2.0 && i < max_iter) { z = z * z", "/ h)}; std::ofstream f(filename); for (long yidx {0}; yidx < n; ++yidx) {", "<complex> #include <fstream> inline int mandelbrot(const std::complex<double> &c) { const int max_iter {100};", "int mandelbrot(const std::complex<double> &c) { const int max_iter {100}; int i {0}; std::complex<double>", "&c) { const int max_iter {100}; int i {0}; std::complex<double> z {0.0, 0.0};", "if sys.version.split('[')[1].startswith('MSC') else \"'-std=c++14'\" config = { 'extra_compile_args': extra_compile_args } mandelbrot_cpp = r'''", "} mandelbrot_cpp = r''' #include <cmath> #include <complex> #include <fstream> inline int mandelbrot(const", "for (long xidx {0}; xidx < n; ++xidx) { f << mandelbrot(std::complex<double>(x, y))", "< 2.0 && i < max_iter) { z = z * z +", "yidx < n; ++yidx) { for (long xidx {0}; xidx < n; ++xidx)", "#include <cmath> #include <complex> #include <fstream> inline int mandelbrot(const std::complex<double> &c) { const", "z + c; ++i; } return max_iter - i; } void compute(std::string filename,", "2.5; } }''' compute = cpp.generate('compute', 'void(std::string, double, double, double)', mandelbrot_cpp, config=config) compute('fractal.dat',", "<< mandelbrot(std::complex<double>(x, y)) << \" \"; x += h; } f << \"\\n\";", "<cmath> #include <complex> #include <fstream> inline int mandelbrot(const std::complex<double> &c) { const int", "< n; ++yidx) { for (long xidx {0}; xidx < n; ++xidx) {", "f(filename); for (long yidx {0}; yidx < n; ++yidx) { for (long xidx", "z = z * z + c; ++i; } return max_iter - i;", "auto n {std::lround(2.5 / h)}; std::ofstream f(filename); for (long yidx {0}; yidx <", "<fstream> inline int mandelbrot(const std::complex<double> &c) { const int max_iter {100}; int i", "std::complex<double> &c) { const int max_iter {100}; int i {0}; std::complex<double> z {0.0,", "n; ++yidx) { for (long xidx {0}; xidx < n; ++xidx) { f", "x += h; } f << \"\\n\"; y -= h; x -= 2.5;", "{ 'extra_compile_args': extra_compile_args } mandelbrot_cpp = r''' #include <cmath> #include <complex> #include <fstream>", "{ const int max_iter {100}; int i {0}; std::complex<double> z {0.0, 0.0}; while", "mandelbrot(std::complex<double>(x, y)) << \" \"; x += h; } f << \"\\n\"; y", "\"; x += h; } f << \"\\n\"; y -= h; x -=", "x -= 2.5; } }''' compute = cpp.generate('compute', 'void(std::string, double, double, double)', mandelbrot_cpp,", "extra_compile_args } mandelbrot_cpp = r''' #include <cmath> #include <complex> #include <fstream> inline int", "\"'/std:c++14'\" if sys.version.split('[')[1].startswith('MSC') else \"'-std=c++14'\" config = { 'extra_compile_args': extra_compile_args } mandelbrot_cpp =", "< max_iter) { z = z * z + c; ++i; } return", "\" \"; x += h; } f << \"\\n\"; y -= h; x", "{0}; yidx < n; ++yidx) { for (long xidx {0}; xidx < n;", "double x, double y, double h) { const auto n {std::lround(2.5 / h)};", "#include <complex> #include <fstream> inline int mandelbrot(const std::complex<double> &c) { const int max_iter", "h; x -= 2.5; } }''' compute = cpp.generate('compute', 'void(std::string, double, double, double)',", "import sys import copperhead as cpp extra_compile_args = \"'/std:c++14'\" if sys.version.split('[')[1].startswith('MSC') else \"'-std=c++14'\"", "mandelbrot(const std::complex<double> &c) { const int max_iter {100}; int i {0}; std::complex<double> z", "z * z + c; ++i; } return max_iter - i; } void", "++yidx) { for (long xidx {0}; xidx < n; ++xidx) { f <<", "(long xidx {0}; xidx < n; ++xidx) { f << mandelbrot(std::complex<double>(x, y)) <<", "double h) { const auto n {std::lround(2.5 / h)}; std::ofstream f(filename); for (long", "+ c; ++i; } return max_iter - i; } void compute(std::string filename, double", "i; } void compute(std::string filename, double x, double y, double h) { const", "{ f << mandelbrot(std::complex<double>(x, y)) << \" \"; x += h; } f", "z {0.0, 0.0}; while (std::abs(z) < 2.0 && i < max_iter) { z", "{100}; int i {0}; std::complex<double> z {0.0, 0.0}; while (std::abs(z) < 2.0 &&", "int max_iter {100}; int i {0}; std::complex<double> z {0.0, 0.0}; while (std::abs(z) <", "yidx {0}; yidx < n; ++yidx) { for (long xidx {0}; xidx <", "'extra_compile_args': extra_compile_args } mandelbrot_cpp = r''' #include <cmath> #include <complex> #include <fstream> inline", "sys.version.split('[')[1].startswith('MSC') else \"'-std=c++14'\" config = { 'extra_compile_args': extra_compile_args } mandelbrot_cpp = r''' #include", "{std::lround(2.5 / h)}; std::ofstream f(filename); for (long yidx {0}; yidx < n; ++yidx)", "{0.0, 0.0}; while (std::abs(z) < 2.0 && i < max_iter) { z =", "copperhead as cpp extra_compile_args = \"'/std:c++14'\" if sys.version.split('[')[1].startswith('MSC') else \"'-std=c++14'\" config = {", "= { 'extra_compile_args': extra_compile_args } mandelbrot_cpp = r''' #include <cmath> #include <complex> #include", "-= 2.5; } }''' compute = cpp.generate('compute', 'void(std::string, double, double, double)', mandelbrot_cpp, config=config)", "const int max_iter {100}; int i {0}; std::complex<double> z {0.0, 0.0}; while (std::abs(z)", "inline int mandelbrot(const std::complex<double> &c) { const int max_iter {100}; int i {0};", "else \"'-std=c++14'\" config = { 'extra_compile_args': extra_compile_args } mandelbrot_cpp = r''' #include <cmath>", "} }''' compute = cpp.generate('compute', 'void(std::string, double, double, double)', mandelbrot_cpp, config=config) compute('fractal.dat', -2.0,", "{ const auto n {std::lround(2.5 / h)}; std::ofstream f(filename); for (long yidx {0};", "} return max_iter - i; } void compute(std::string filename, double x, double y," ]
[ "\"\"\" Subtracts a background that is based on multiple images stored in an", "BackgroundSubtractor: \"\"\" Subtracts background from an image \"\"\" @abstractmethod def subtract_background(self, image: np.ndarray)", "super().__init__() self.background_dset = background_dset # Calculate background image self.calculate_mean_background(background_dset) def subtract_background(self, image: np.ndarray)", "mean of the background images \"\"\" data = self.background_dset[\"CameraData\"] self.mean_background = np.nanmean(np.array(list(data)), axis=0)", "pd.DataFrame) -> None: super().__init__() self.background_dset = background_dset # Calculate background image self.calculate_mean_background(background_dset) def", "\"\"\" @abstractmethod def subtract_background(self, image: np.ndarray) -> np.ndarray: \"\"\" Subtracts background from an", "axis=0) class FittedBackgroundSubtractor(BackgroundSubtractor): \"\"\" Subtracts a background based on fit around the image.", "pd class BackgroundSubtractor: \"\"\" Subtracts background from an image \"\"\" @abstractmethod def subtract_background(self,", "the mean of the background images \"\"\" data = self.background_dset[\"CameraData\"] self.mean_background = np.nanmean(np.array(list(data)),", "@abstractmethod def subtract_background(self, image: np.ndarray) -> np.ndarray: \"\"\" Subtracts background from an image", "np.ndarray: \"\"\" Subtracts background from an image \"\"\" ... class AcquiredBackgroundSubtractor(BackgroundSubtractor): \"\"\" Subtracts", "pandas as pd class BackgroundSubtractor: \"\"\" Subtracts background from an image \"\"\" @abstractmethod", "None: \"\"\" Calculates the mean of the background images \"\"\" data = self.background_dset[\"CameraData\"]", "background that is based on multiple images stored in an hdf dataset. \"\"\"", "self.calculate_mean_background(background_dset) def subtract_background(self, image: np.ndarray) -> np.ndarray: # Subtract background image_bs = image", "calculate_mean_background(self, df: pd.DataFrame) -> None: \"\"\" Calculates the mean of the background images", "= self.background_dset[\"CameraData\"] self.mean_background = np.nanmean(np.array(list(data)), axis=0) class FittedBackgroundSubtractor(BackgroundSubtractor): \"\"\" Subtracts a background based", "stored in an hdf dataset. \"\"\" def __init__(self, background_dset: pd.DataFrame) -> None: super().__init__()", "image \"\"\" ... class AcquiredBackgroundSubtractor(BackgroundSubtractor): \"\"\" Subtracts a background that is based on", "as pd class BackgroundSubtractor: \"\"\" Subtracts background from an image \"\"\" @abstractmethod def", "multiple images stored in an hdf dataset. \"\"\" def __init__(self, background_dset: pd.DataFrame) ->", "# Subtract background image_bs = image - self.mean_background # Return background subtracted image", "subtracting background from camera images \"\"\" from abc import ABC, abstractmethod from dataclasses", "np.nanmean(np.array(list(data)), axis=0) class FittedBackgroundSubtractor(BackgroundSubtractor): \"\"\" Subtracts a background based on fit around the", "FittedBackgroundSubtractor(BackgroundSubtractor): \"\"\" Subtracts a background based on fit around the image. \"\"\" #", "data = self.background_dset[\"CameraData\"] self.mean_background = np.nanmean(np.array(list(data)), axis=0) class FittedBackgroundSubtractor(BackgroundSubtractor): \"\"\" Subtracts a background", "class AcquiredBackgroundSubtractor(BackgroundSubtractor): \"\"\" Subtracts a background that is based on multiple images stored", "np import pandas as pd class BackgroundSubtractor: \"\"\" Subtracts background from an image", "background from an image \"\"\" @abstractmethod def subtract_background(self, image: np.ndarray) -> np.ndarray: \"\"\"", "an image \"\"\" @abstractmethod def subtract_background(self, image: np.ndarray) -> np.ndarray: \"\"\" Subtracts background", "Calculate background image self.calculate_mean_background(background_dset) def subtract_background(self, image: np.ndarray) -> np.ndarray: # Subtract background", "background image_bs = image - self.mean_background # Return background subtracted image return image_bs", "that is based on multiple images stored in an hdf dataset. \"\"\" def", "= background_dset # Calculate background image self.calculate_mean_background(background_dset) def subtract_background(self, image: np.ndarray) -> np.ndarray:", "subtracted image return image_bs def calculate_mean_background(self, df: pd.DataFrame) -> None: \"\"\" Calculates the", "the background images \"\"\" data = self.background_dset[\"CameraData\"] self.mean_background = np.nanmean(np.array(list(data)), axis=0) class FittedBackgroundSubtractor(BackgroundSubtractor):", "\"\"\" data = self.background_dset[\"CameraData\"] self.mean_background = np.nanmean(np.array(list(data)), axis=0) class FittedBackgroundSubtractor(BackgroundSubtractor): \"\"\" Subtracts a", "def __init__(self, background_dset: pd.DataFrame) -> None: super().__init__() self.background_dset = background_dset # Calculate background", "import numpy as np import pandas as pd class BackgroundSubtractor: \"\"\" Subtracts background", "None: super().__init__() self.background_dset = background_dset # Calculate background image self.calculate_mean_background(background_dset) def subtract_background(self, image:", "image \"\"\" @abstractmethod def subtract_background(self, image: np.ndarray) -> np.ndarray: \"\"\" Subtracts background from", "\"\"\" Subtracts background from an image \"\"\" ... class AcquiredBackgroundSubtractor(BackgroundSubtractor): \"\"\" Subtracts a", "background_dset # Calculate background image self.calculate_mean_background(background_dset) def subtract_background(self, image: np.ndarray) -> np.ndarray: #", "-> np.ndarray: # Subtract background image_bs = image - self.mean_background # Return background", "image: np.ndarray) -> np.ndarray: # Subtract background image_bs = image - self.mean_background #", "\"\"\" Subtracts background from an image \"\"\" @abstractmethod def subtract_background(self, image: np.ndarray) ->", "\"\"\" ... class AcquiredBackgroundSubtractor(BackgroundSubtractor): \"\"\" Subtracts a background that is based on multiple", "AcquiredBackgroundSubtractor(BackgroundSubtractor): \"\"\" Subtracts a background that is based on multiple images stored in", "self.background_dset[\"CameraData\"] self.mean_background = np.nanmean(np.array(list(data)), axis=0) class FittedBackgroundSubtractor(BackgroundSubtractor): \"\"\" Subtracts a background based on", "np.ndarray) -> np.ndarray: # Subtract background image_bs = image - self.mean_background # Return", "camera images \"\"\" from abc import ABC, abstractmethod from dataclasses import dataclass import", "Subtracts a background that is based on multiple images stored in an hdf", "background subtracted image return image_bs def calculate_mean_background(self, df: pd.DataFrame) -> None: \"\"\" Calculates", "self.mean_background = np.nanmean(np.array(list(data)), axis=0) class FittedBackgroundSubtractor(BackgroundSubtractor): \"\"\" Subtracts a background based on fit", "... class AcquiredBackgroundSubtractor(BackgroundSubtractor): \"\"\" Subtracts a background that is based on multiple images", "pd.DataFrame) -> None: \"\"\" Calculates the mean of the background images \"\"\" data", "image - self.mean_background # Return background subtracted image return image_bs def calculate_mean_background(self, df:", "background images \"\"\" data = self.background_dset[\"CameraData\"] self.mean_background = np.nanmean(np.array(list(data)), axis=0) class FittedBackgroundSubtractor(BackgroundSubtractor): \"\"\"", "\"\"\" Objects used for subtracting background from camera images \"\"\" from abc import", "as np import pandas as pd class BackgroundSubtractor: \"\"\" Subtracts background from an", "image: np.ndarray) -> np.ndarray: \"\"\" Subtracts background from an image \"\"\" ... class", "on multiple images stored in an hdf dataset. \"\"\" def __init__(self, background_dset: pd.DataFrame)", "image return image_bs def calculate_mean_background(self, df: pd.DataFrame) -> None: \"\"\" Calculates the mean", "return image_bs def calculate_mean_background(self, df: pd.DataFrame) -> None: \"\"\" Calculates the mean of", "def subtract_background(self, image: np.ndarray) -> np.ndarray: # Subtract background image_bs = image -", "from dataclasses import dataclass import numpy as np import pandas as pd class", "dataclass import numpy as np import pandas as pd class BackgroundSubtractor: \"\"\" Subtracts", "\"\"\" Calculates the mean of the background images \"\"\" data = self.background_dset[\"CameraData\"] self.mean_background", "Objects used for subtracting background from camera images \"\"\" from abc import ABC,", "# Calculate background image self.calculate_mean_background(background_dset) def subtract_background(self, image: np.ndarray) -> np.ndarray: # Subtract", "Calculates the mean of the background images \"\"\" data = self.background_dset[\"CameraData\"] self.mean_background =", "from abc import ABC, abstractmethod from dataclasses import dataclass import numpy as np", "= np.nanmean(np.array(list(data)), axis=0) class FittedBackgroundSubtractor(BackgroundSubtractor): \"\"\" Subtracts a background based on fit around", "an hdf dataset. \"\"\" def __init__(self, background_dset: pd.DataFrame) -> None: super().__init__() self.background_dset =", "- self.mean_background # Return background subtracted image return image_bs def calculate_mean_background(self, df: pd.DataFrame)", "image_bs = image - self.mean_background # Return background subtracted image return image_bs def", "Subtract background image_bs = image - self.mean_background # Return background subtracted image return", "from an image \"\"\" ... class AcquiredBackgroundSubtractor(BackgroundSubtractor): \"\"\" Subtracts a background that is", "np.ndarray) -> np.ndarray: \"\"\" Subtracts background from an image \"\"\" ... class AcquiredBackgroundSubtractor(BackgroundSubtractor):", "Subtracts background from an image \"\"\" ... class AcquiredBackgroundSubtractor(BackgroundSubtractor): \"\"\" Subtracts a background", "\"\"\" from abc import ABC, abstractmethod from dataclasses import dataclass import numpy as", "from camera images \"\"\" from abc import ABC, abstractmethod from dataclasses import dataclass", "self.mean_background # Return background subtracted image return image_bs def calculate_mean_background(self, df: pd.DataFrame) ->", "background from camera images \"\"\" from abc import ABC, abstractmethod from dataclasses import", "ABC, abstractmethod from dataclasses import dataclass import numpy as np import pandas as", "for subtracting background from camera images \"\"\" from abc import ABC, abstractmethod from", "import pandas as pd class BackgroundSubtractor: \"\"\" Subtracts background from an image \"\"\"", "image_bs def calculate_mean_background(self, df: pd.DataFrame) -> None: \"\"\" Calculates the mean of the", "is based on multiple images stored in an hdf dataset. \"\"\" def __init__(self,", "df: pd.DataFrame) -> None: \"\"\" Calculates the mean of the background images \"\"\"", "-> None: super().__init__() self.background_dset = background_dset # Calculate background image self.calculate_mean_background(background_dset) def subtract_background(self,", "= image - self.mean_background # Return background subtracted image return image_bs def calculate_mean_background(self,", "import dataclass import numpy as np import pandas as pd class BackgroundSubtractor: \"\"\"", "\"\"\" Subtracts a background based on fit around the image. \"\"\" # todo", "images stored in an hdf dataset. \"\"\" def __init__(self, background_dset: pd.DataFrame) -> None:", "numpy as np import pandas as pd class BackgroundSubtractor: \"\"\" Subtracts background from", "self.background_dset = background_dset # Calculate background image self.calculate_mean_background(background_dset) def subtract_background(self, image: np.ndarray) ->", "abc import ABC, abstractmethod from dataclasses import dataclass import numpy as np import", "import ABC, abstractmethod from dataclasses import dataclass import numpy as np import pandas", "image self.calculate_mean_background(background_dset) def subtract_background(self, image: np.ndarray) -> np.ndarray: # Subtract background image_bs =", "subtract_background(self, image: np.ndarray) -> np.ndarray: # Subtract background image_bs = image - self.mean_background", "images \"\"\" data = self.background_dset[\"CameraData\"] self.mean_background = np.nanmean(np.array(list(data)), axis=0) class FittedBackgroundSubtractor(BackgroundSubtractor): \"\"\" Subtracts", "def calculate_mean_background(self, df: pd.DataFrame) -> None: \"\"\" Calculates the mean of the background", "images \"\"\" from abc import ABC, abstractmethod from dataclasses import dataclass import numpy", "class BackgroundSubtractor: \"\"\" Subtracts background from an image \"\"\" @abstractmethod def subtract_background(self, image:", "__init__(self, background_dset: pd.DataFrame) -> None: super().__init__() self.background_dset = background_dset # Calculate background image", "Return background subtracted image return image_bs def calculate_mean_background(self, df: pd.DataFrame) -> None: \"\"\"", "-> None: \"\"\" Calculates the mean of the background images \"\"\" data =", "Subtracts background from an image \"\"\" @abstractmethod def subtract_background(self, image: np.ndarray) -> np.ndarray:", "background from an image \"\"\" ... class AcquiredBackgroundSubtractor(BackgroundSubtractor): \"\"\" Subtracts a background that", "def subtract_background(self, image: np.ndarray) -> np.ndarray: \"\"\" Subtracts background from an image \"\"\"", "a background that is based on multiple images stored in an hdf dataset.", "based on multiple images stored in an hdf dataset. \"\"\" def __init__(self, background_dset:", "-> np.ndarray: \"\"\" Subtracts background from an image \"\"\" ... class AcquiredBackgroundSubtractor(BackgroundSubtractor): \"\"\"", "dataclasses import dataclass import numpy as np import pandas as pd class BackgroundSubtractor:", "class FittedBackgroundSubtractor(BackgroundSubtractor): \"\"\" Subtracts a background based on fit around the image. \"\"\"", "in an hdf dataset. \"\"\" def __init__(self, background_dset: pd.DataFrame) -> None: super().__init__() self.background_dset", "dataset. \"\"\" def __init__(self, background_dset: pd.DataFrame) -> None: super().__init__() self.background_dset = background_dset #", "from an image \"\"\" @abstractmethod def subtract_background(self, image: np.ndarray) -> np.ndarray: \"\"\" Subtracts", "subtract_background(self, image: np.ndarray) -> np.ndarray: \"\"\" Subtracts background from an image \"\"\" ...", "abstractmethod from dataclasses import dataclass import numpy as np import pandas as pd", "background image self.calculate_mean_background(background_dset) def subtract_background(self, image: np.ndarray) -> np.ndarray: # Subtract background image_bs", "used for subtracting background from camera images \"\"\" from abc import ABC, abstractmethod", "hdf dataset. \"\"\" def __init__(self, background_dset: pd.DataFrame) -> None: super().__init__() self.background_dset = background_dset", "\"\"\" def __init__(self, background_dset: pd.DataFrame) -> None: super().__init__() self.background_dset = background_dset # Calculate", "an image \"\"\" ... class AcquiredBackgroundSubtractor(BackgroundSubtractor): \"\"\" Subtracts a background that is based", "np.ndarray: # Subtract background image_bs = image - self.mean_background # Return background subtracted", "of the background images \"\"\" data = self.background_dset[\"CameraData\"] self.mean_background = np.nanmean(np.array(list(data)), axis=0) class", "background_dset: pd.DataFrame) -> None: super().__init__() self.background_dset = background_dset # Calculate background image self.calculate_mean_background(background_dset)", "# Return background subtracted image return image_bs def calculate_mean_background(self, df: pd.DataFrame) -> None:" ]
[ "whether hold should be turned on or off (default: True) Returns: None '''", "be saved. If it is False, then the file is only saved upon", "= cursession() if not document: document = curdoc() if session: return session.store_document(curdoc()) else:", "Returns: plot : the current default plot (or None) ''' return curdoc().curplot() def", "show(). mode (str, optional) : how to inlude BokehJS (default: \"inline\") **mode** can", "flask import request doc = request.bokeh_server_document logger.debug(\"returning config from flask request\") return doc", "controller.open(session.object_link(curdoc()._plotcontext)) elif filename: save(filename) controller.open(\"file://\" + os.path.abspath(filename), new=new_param) def save(filename=None, resources=None): \"\"\" Updates", "one. Returns: session : the current default session object (or None) ''' return", ": the current default document object. ''' try: \"\"\"This is used when we", "warnings.warn(\"No current plot to save. Use renderer functions (circle, rect, etc.) to create", "or 'absolute(-dev)'. In the 'relative(-dev)' case, **root_dir** can be specified to indicate the", "present resources (Resources, optional) : BokehJS resource config to use if `resources` is", "you still want the API to work but you don't want to use", "return session.store_document(curdoc()) else: warnings.warn(\"push() called but no session was supplied and output_server(...) was", "_doc_wrap(gf.annulus) arc = _doc_wrap(gf.arc) asterisk = _doc_wrap(gf.asterisk) bezier = _doc_wrap(gf.bezier) circle = _doc_wrap(gf.circle)", "Walk the plot_arrangement and remove them from the plotcontext, # so they don't", "_alpha_fields = set([\"alpha\", \"fill_alpha\", \"line_alpha\"]) def scatter(*args, **kwargs): \"\"\" Creates a scatter plot", "\"\"\" output_server(docname, session=Cloud()) def output_notebook(url=None, docname=None, session=None, name=None): if session or url or", "(str, optional) : if name is None, use the server URL as the", "create a current plot (see http://bokeh.pydata.org/index.html)\") return if notebook and session: import IPython.core.displaypub", "default document is pushed Returns: None \"\"\" if not session: session = cursession()", "object or splattable list of y-axis objects on the current plot \"\"\" p", "marker_type, defaults to \"circle\" color (color value, optional): shorthand to set both fill", "`filename` is None, the current output_file(...) filename is used if present resources (Resources,", "from .resources import Resources from .session import Cloud, DEFAULT_SERVER_URL, Session logger = logging.getLogger(__name__)", "= kwargs.get(\"marker\", \"circle\") # TODO: How to handle this? Just call curplot()? if", "_default_notebook # Map our string argument to the webbrowser.open argument new_param = {'tab':", "def scatter(*args, **kwargs): \"\"\" Creates a scatter plot of the given x and", "warnings.warn(\"push() called but no session was supplied and output_server(...) was never called, nothing", "causes the file to be saved. If it is False, then the file", "= _doc_wrap(gf.ray) rect = _doc_wrap(gf.rect) segment = _doc_wrap(gf.segment) square = _doc_wrap(gf.square) square_cross =", "datasource = _handle_1d_data_args(args, datasource=ds) kwargs[\"source\"] = datasource markertype = kwargs.get(\"marker\", \"circle\") # TODO:", "None _default_file = None _default_notebook = None def curdoc(): ''' Return the current", "def output_notebook(url=None, docname=None, session=None, name=None): if session or url or name: if docname", "replaces any existing default Server session \"\"\" global _default_session if url == \"default\":", ":ref:`userguide_fill_properties` are also accepted as keyword parameters. Examples: >>> scatter([1,2,3],[4,5,6], fill_color=\"red\") >>> scatter(\"data1\",", "on the current plot \"\"\" return _list_attr_splat(xaxis() + yaxis()) def legend(): \"\"\" Get", "module documentation in the standard lib for more details.) new (str, optional) :", "= file_html(curdoc(), resources, _default_file['title']) with open(filename, \"w\") as f: f.write(html) def push(session=None, document=None):", "True def output_file(filename, title=\"Bokeh Plot\", autosave=True, mode=\"inline\", root_dir=None): \"\"\" Outputs to a static", "to curdoc().hold(...) Args: value (bool, optional) : whether hold should be turned on", "document to push if `document` is None, the current default document is pushed", "from . import browserlib from . import _glyph_functions as gf from .document import", "session \"\"\" global _default_session if url == \"default\": url = DEFAULT_SERVER_URL if name", "None: return None legends = [obj for obj in p.renderers if isinstance(obj, Legend)]", "don't want to use the global module level document \"\"\" from flask import", "in p.renderers if isinstance(obj, Legend)] return _list_attr_splat(legends) def xgrid(): \"\"\" Get the current", "session.use_doc(docname) session.load_document(curdoc()) def output_cloud(docname): \"\"\" Cause plotting commands to automatically persist plots to", "Y) Two 1D arrays or iterables (XNAME, YNAME) Two bokeh DataSource/ColumnsRef marker (str,", "(str) : name of document to push on Bokeh server An existing documents", "load_notebook load_notebook() global _default_notebook _default_notebook = True def output_file(filename, title=\"Bokeh Plot\", autosave=True, mode=\"inline\",", "Returns: None \"\"\" if filename is None and _default_file: filename = _default_file['filename'] if", "document: document = curdoc() if session: return session.store_document(curdoc()) else: warnings.warn(\"push() called but no", "Returns grid object or splattable list of grid objects on the current plot", ": browser to show with (default: None) For systems that support it, the", "to create a current plot (see http://bokeh.pydata.org/index.html)\") return html = file_html(curdoc(), resources, _default_file['title'])", "GridPlot(children=plot_arrangement) if name: grid._id = name # Walk the plot_arrangement and remove them", "to show. Use renderer functions (circle, rect, etc.) to create a current plot", "isinstance(obj, Legend)] return _list_attr_splat(legends) def xgrid(): \"\"\" Get the current `x` :class:`grid <bokeh.objects.Grid>`", "Returns: Returns axis object or splattable list of axis objects on the current", "and _default_file['autosave']: save() return retval wrapper.__doc__ += \"\\nThis is a convenience function that", "object(s) Returns: Returns legend object or splattable list of legend objects on the", "marker types.\" % markertype) return _marker_types[markertype](*args, **kwargs) def gridplot(plot_arrangement, name=None): \"\"\" Generate a", "quad = _doc_wrap(gf.quad) quadratic = _doc_wrap(gf.quadratic) ray = _doc_wrap(gf.ray) rect = _doc_wrap(gf.rect) segment", "of document to push on Bokeh server An existing documents with the same", "new (str, optional) : new file output mode (default: \"tab\") For file-based output,", "''' return _default_session def hold(value=True): ''' Set or clear the plot hold status", "beginning of an interactive session or the top of a script. .. note::", "from .embed import notebook_div, file_html, autoload_server from .objects import Axis, ColumnDataSource, Glyph, Grid,", "of a script. \"\"\" global _default_file _default_file = { 'filename' : filename, 'resources'", "Get the current `x` :class:`grid <bokeh.objects.Grid>` object(s) Returns: Returns legend object or splattable", "to be saved. If it is False, then the file is only saved", "ColumnDataSource, Glyph, Grid, GridPlot, Legend from .palettes import brewer from .plotting_helpers import (", "will replaces any existing default Server session \"\"\" output_server(docname, session=Cloud()) def output_notebook(url=None, docname=None,", "document. Returns: doc : the current default document object. ''' try: \"\"\"This is", "<bokeh.objects.Grid>` object(s) Returns: Returns grid object or splattable list of grid objects on", "for obj in p.renderers if isinstance(obj, Grid) and obj.dimension==1] return _list_attr_splat(grid) def grid():", "get_default_color() if not len(_alpha_fields.intersection(set(kwargs.keys()))): kwargs['alpha'] = get_default_alpha() if markertype not in _marker_types: raise", "`y` axis object(s) Returns: Returns y-axis object or splattable list of y-axis objects", "called but no resources was supplied and output_file(...) was never called, nothing saved\")", "Activate a new figure for plotting. All subsequent plotting operations will affect the", "a current plot (see http://bokeh.pydata.org/index.html)\") return html = file_html(curdoc(), resources, _default_file['title']) with open(filename,", "import Axis, ColumnDataSource, Glyph, Grid, GridPlot, Legend from .palettes import brewer from .plotting_helpers", "computed. .. note:: Generally, this should be called at the beginning of an", "_default_file['autosave']: save() return retval wrapper.__doc__ += \"\\nThis is a convenience function that acts", "def cursession(): ''' Return the current session, if there is one. Returns: session", ": Resources(mode=mode, root_dir=root_dir, minified=False), 'autosave' : autosave, 'title' : title, } if os.path.isfile(filename):", "session object (or None) ''' return _default_session def hold(value=True): ''' Set or clear", "plots to the Bokeh cloud server. Args: docname (str) : name of document", "of the Bokeh server (default: \"default\") if url is \"default\" use session.DEFAULT_SERVER_URL name", "equivalent to curdoc().%s(...)\" % func.__name__ return wrapper annular_wedge = _doc_wrap(gf.annular_wedge) annulus = _doc_wrap(gf.annulus)", "to create a current plot (see http://bokeh.pydata.org/index.html)\") return if notebook and session: import", "the current default document object. ''' try: \"\"\"This is used when we need", "Map our string argument to the webbrowser.open argument new_param = {'tab': 2, 'window':", "they don't show up twice subplots = itertools.chain.from_iterable(plot_arrangement) curdoc().get_context().children = list(set(curdoc().get_context().children) - set(subplots))", "for example) in this case you still want the API to work but", "**kwargs) def gridplot(plot_arrangement, name=None): \"\"\" Generate a plot that arranges several subplots into", ".embed import notebook_div, file_html, autoload_server from .objects import Axis, ColumnDataSource, Glyph, Grid, GridPlot,", "cursession(): ''' Return the current session, if there is one. Returns: session :", "plot (see http://bokeh.pydata.org/index.html)\") return html = file_html(curdoc(), resources, _default_file['title']) with open(filename, \"w\") as", "and _default_file['autosave']: save() return grid def xaxis(): \"\"\" Get the current axis objects", "\"\"\" Get the current :class:`legend <bokeh.objects.Legend>` object(s) Returns: Returns legend object or splattable", "None) if `sessiokn` is None, the current output_server(...) session is used if present", "work but you don't want to use the global module level document \"\"\"", "functools import wraps import itertools import time import logging import os import uuid", "''' Return the current default plot object. Returns: plot : the current default", "} if os.path.isfile(filename): print(\"Session output file '%s' already exists, will be overwritten.\" %", "import logging import os import uuid import warnings from . import browserlib from", "{'text/html': notebook_div(plot)}) elif session: push() if url: controller.open(url, new=new_param) else: controller.open(session.object_link(curdoc()._plotcontext)) elif filename:", "nothing saved\") return if not resources: warnings.warn(\"save() called but no resources was supplied", "current axis objects Returns: Returns axis object or splattable list of axis objects", "the :ref:`userguide_line_properties` and :ref:`userguide_fill_properties` are also accepted as keyword parameters. Examples: >>> scatter([1,2,3],[4,5,6],", "for obj in p.renderers if isinstance(obj, Axis) and obj.dimension==1] return _list_attr_splat(axis) def axis():", "_doc_wrap(gf.circle_cross) circle_x = _doc_wrap(gf.circle_x) cross = _doc_wrap(gf.cross) diamond = _doc_wrap(gf.diamond) diamond_cross = _doc_wrap(gf.diamond_cross)", "[obj for obj in p.renderers if isinstance(obj, Axis) and obj.dimension==1] return _list_attr_splat(axis) def", "splattable list of y-axis objects on the current plot \"\"\" p = curplot()", "not document: document = curdoc() if session: return session.store_document(curdoc()) else: warnings.warn(\"push() called but", "_list_attr_splat(axis) def axis(): \"\"\" Get the current `x` axis object(s) Returns: Returns x-axis", "p.renderers if isinstance(obj, Grid) and obj.dimension==1] return _list_attr_splat(grid) def grid(): \"\"\" Get the", "= _doc_wrap(gf.multi_line) oval = _doc_wrap(gf.oval) patch = _doc_wrap(gf.patch) patches = _doc_wrap(gf.patches) quad =", "is a convenience function that acts on the current document, and is equivalent", "plotting commands to automatically persist plots to the Bokeh cloud server. Args: docname", "plot: warnings.warn(\"No current plot to show. Use renderer functions (circle, rect, etc.) to", "push(session=None, document=None): \"\"\" Updates the server with the data for the current document.", "inverted_triangle, \"square\": square, \"square_x\": square_x, \"square_cross\": square_cross, \"triangle\": triangle, \"x\": x, \"*\": asterisk,", "splattable list of x-axis objects on the current plot \"\"\" return _list_attr_splat(xaxis() +", "\"x\": x, \"*\": asterisk, \"+\": cross, \"o\": circle, \"ox\": circle_x, \"o+\": circle_cross, }", "= None _default_notebook = None def curdoc(): ''' Return the current document. Returns:", "plot \"\"\" return _list_attr_splat(xgrid() + ygrid()) def load_object(obj): \"\"\"updates object from the server", "(circle, rect, etc.) to create a current plot (see http://bokeh.pydata.org/index.html)\") return html =", "attrs if _default_session: push() if _default_file and _default_file['autosave']: save() return grid def xaxis():", "output_notebook(url=None, docname=None, session=None, name=None): if session or url or name: if docname is", "= _doc_wrap(gf.circle_x) cross = _doc_wrap(gf.cross) diamond = _doc_wrap(gf.diamond) diamond_cross = _doc_wrap(gf.diamond_cross) image =", "or splattable list of legend objects on the current plot \"\"\" p =", "rect, etc.) to create a current plot (see http://bokeh.pydata.org/index.html)\") return html = file_html(curdoc(),", "cell (IPython notebook). Args: browser (str, optional) : browser to show with (default:", "None _default_notebook = None def curdoc(): ''' Return the current document. Returns: doc", "\"default\" use session.DEFAULT_SERVER_URL name (str, optional) : if name is None, use the", "cross, \"o\": circle, \"ox\": circle_x, \"o+\": circle_cross, } def markers(): \"\"\" Prints a", "Grid) and obj.dimension==1] return _list_attr_splat(grid) def grid(): \"\"\" Get the current :class:`grid <bokeh.objects.Grid>`", "(str, optional) : browser to show with (default: None) For systems that support", "axis(): \"\"\" Get the current `x` axis object(s) Returns: Returns x-axis object or", "list of y-grid objects on the current plot \"\"\" p = curplot() if", "circle_cross = _doc_wrap(gf.circle_cross) circle_x = _doc_wrap(gf.circle_x) cross = _doc_wrap(gf.cross) diamond = _doc_wrap(gf.diamond) diamond_cross", "(str, optianal) : URL of the Bokeh server (default: \"default\") if url is", "= _doc_wrap(gf.cross) diamond = _doc_wrap(gf.diamond) diamond_cross = _doc_wrap(gf.diamond_cross) image = _doc_wrap(gf.image) image_rgba =", "_default_document = Document() _default_session = None _default_file = None _default_notebook = None def", "snippet}) elif notebook: import IPython.core.displaypub as displaypub displaypub.publish_display_data('bokeh', {'text/html': notebook_div(plot)}) elif session: push()", "cloud server. Args: docname (str) : name of document to push on Bokeh", "square_cross, \"triangle\": triangle, \"x\": x, \"*\": asterisk, \"+\": cross, \"o\": circle, \"ox\": circle_x,", "given filename. Args: filename (str, optional) : filename to save document under (default:", "`plot_arrangement` can be nested, e.g [[p1, p2], [p3, p4]] Returns: grid_plot: the current", "of several forms: (X, Y) Two 1D arrays or iterables (XNAME, YNAME) Two", "global module level document \"\"\" from flask import request doc = request.bokeh_server_document logger.debug(\"returning", "with the same name will be overwritten. .. note:: Generally, this should be", "show(browser=None, new=\"tab\", url=None): \"\"\" 'shows' the current plot, by auto-raising the window or", "\"diamond\": diamond, \"diamond_cross\": diamond_cross, \"inverted_triangle\": inverted_triangle, \"square\": square, \"square_x\": square_x, \"square_cross\": square_cross, \"triangle\":", "save(filename) controller.open(\"file://\" + os.path.abspath(filename), new=new_param) def save(filename=None, resources=None): \"\"\" Updates the file with", ": whether to automatically save (default: True) If **autosave** is True, then every", "import itertools import time import logging import os import uuid import warnings from", "autosave, 'title' : title, } if os.path.isfile(filename): print(\"Session output file '%s' already exists,", "time import logging import os import uuid import warnings from . import browserlib", "# Map our string argument to the webbrowser.open argument new_param = {'tab': 2,", "= _doc_wrap(gf.rect) segment = _doc_wrap(gf.segment) square = _doc_wrap(gf.square) square_cross = _doc_wrap(gf.square_cross) square_x =", "output_server(...) session is used if present document (Document, optional) : BokehJS document to", "and is equivalent to curdoc().hold(...) Args: value (bool, optional) : whether hold should", "this causes the file to be saved. If it is False, then the", "list of grid objects on the current plot \"\"\" return _list_attr_splat(xgrid() + ygrid())", "on the current document, and is equivalent to curdoc().hold(...) Args: value (bool, optional)", "return if not resources: warnings.warn(\"save() called but no resources was supplied and output_file(...)", "markertype) return _marker_types[markertype](*args, **kwargs) def gridplot(plot_arrangement, name=None): \"\"\" Generate a plot that arranges", "= _doc_wrap(gf.text) triangle = _doc_wrap(gf.triangle) wedge = _doc_wrap(gf.wedge) x = _doc_wrap(gf.x) _marker_types =", "Updates the server with the data for the current document. Args: session (Sesion,", "request context. (Applets do this for example) in this case you still want", "_doc_wrap(gf.image_url) inverted_triangle = _doc_wrap(gf.inverted_triangle) line = _doc_wrap(gf.line) multi_line = _doc_wrap(gf.multi_line) oval = _doc_wrap(gf.oval)", "grid # TODO (bev) don't use private attrs if _default_session: push() if _default_file", "in p.renderers if isinstance(obj, Grid) and obj.dimension==0] return _list_attr_splat(grid) def ygrid(): \"\"\" Get", "import request doc = request.bokeh_server_document logger.debug(\"returning config from flask request\") return doc except", "logger = logging.getLogger(__name__) _default_document = Document() _default_session = None _default_file = None _default_notebook", "to use the global module level document \"\"\" from flask import request doc", "\"\"\" Get the current `y` :class:`grid <bokeh.objects.Grid>` object(s) Returns: Returns y-grid object or", "**kwargs) if cursession() and curdoc()._autostore: push() if _default_file and _default_file['autosave']: save() return retval", "\"inverted_triangle\": inverted_triangle, \"square\": square, \"square_x\": square_x, \"square_cross\": square_cross, \"triangle\": triangle, \"x\": x, \"*\":", "= _doc_wrap(gf.triangle) wedge = _doc_wrap(gf.wedge) x = _doc_wrap(gf.x) _marker_types = { \"asterisk\": asterisk,", "default resource config is used Returns: None \"\"\" if filename is None and", "to push on Bokeh server An existing documents with the same name will", "be specified to indicate the base directory from which the path to the", "be supplied. Returns: None .. note:: Generally, this should be called at the", "current plot (see http://bokeh.pydata.org/index.html)\") return if notebook and session: import IPython.core.displaypub as displaypub", "saved\") return if not curplot(): warnings.warn(\"No current plot to save. Use renderer functions", "uuid import warnings from . import browserlib from . import _glyph_functions as gf", "name=name) else: from . import load_notebook load_notebook() global _default_notebook _default_notebook = True def", "and curdoc()._autostore: push() if _default_file and _default_file['autosave']: save() return retval wrapper.__doc__ += \"\\nThis", "not plot: warnings.warn(\"No current plot to show. Use renderer functions (circle, rect, etc.)", "the data for the current document. If a filename is supplied, or output_file(...)", "This is a convenience function that acts on the current document, and is", "plotting API from within the server, within a request context. (Applets do this", "list of valid marker types.\" % markertype) return _marker_types[markertype](*args, **kwargs) def gridplot(plot_arrangement, name=None):", "from . import _glyph_functions as gf from .document import Document from .embed import", "valid marker_type, defaults to \"circle\" color (color value, optional): shorthand to set both", "document, and is equivalent to curdoc().%s(...)\" % func.__name__ return wrapper annular_wedge = _doc_wrap(gf.annular_wedge)", "from which the path to the various static files should be computed. ..", "and is equivalent to curdoc().%s(...)\" % func.__name__ return wrapper annular_wedge = _doc_wrap(gf.annular_wedge) annulus", "return doc except (ImportError, RuntimeError, AttributeError): return _default_document def curplot(): ''' Return the", "`x` :class:`grid <bokeh.objects.Grid>` object(s) Returns: Returns legend object or splattable list of legend", "ray = _doc_wrap(gf.ray) rect = _doc_wrap(gf.rect) segment = _doc_wrap(gf.segment) square = _doc_wrap(gf.square) square_cross", "Legend)] return _list_attr_splat(legends) def xgrid(): \"\"\" Get the current `x` :class:`grid <bokeh.objects.Grid>` object(s)", "default session. Args: docname (str) : name of document to push on Bokeh", "_doc_wrap(gf.text) triangle = _doc_wrap(gf.triangle) wedge = _doc_wrap(gf.wedge) x = _doc_wrap(gf.x) _marker_types = {", "that arranges several subplots into a grid. Args: plot_arrangement (list[:class:`Plot <bokeh.objects.Plot>`]) : plots", "grid = [obj for obj in p.renderers if isinstance(obj, Grid) and obj.dimension==0] return", "= [obj for obj in p.renderers if isinstance(obj, Grid) and obj.dimension==0] return _list_attr_splat(grid)", "save() return retval wrapper.__doc__ += \"\\nThis is a convenience function that acts on", "(str, optional) : new file output mode (default: \"tab\") For file-based output, opens", "the default session url (str, optianal) : URL of the Bokeh server (default:", "''' Activate a new figure for plotting. All subsequent plotting operations will affect", "= _doc_wrap(gf.bezier) circle = _doc_wrap(gf.circle) circle_cross = _doc_wrap(gf.circle_cross) circle_x = _doc_wrap(gf.circle_x) cross =", "need to call the plotting API from within the server, within a request", "splattable list of legend objects on the current plot \"\"\" p = curplot()", "def show(browser=None, new=\"tab\", url=None): \"\"\" 'shows' the current plot, by auto-raising the window", "functions (circle, rect, etc.) to create a current plot (see http://bokeh.pydata.org/index.html)\") return if", "itertools.chain.from_iterable(plot_arrangement) curdoc().get_context().children = list(set(curdoc().get_context().children) - set(subplots)) curdoc().add(grid) curdoc()._current_plot = grid # TODO (bev)", "plotcontext, # so they don't show up twice subplots = itertools.chain.from_iterable(plot_arrangement) curdoc().get_context().children =", "None) If session is None, use the default session url (str, optianal) :", "None, use the server URL as the name Additional keyword arguments like **username**,", "are also accepted as keyword parameters. Examples: >>> scatter([1,2,3],[4,5,6], fill_color=\"red\") >>> scatter(\"data1\", \"data2\",", "the plot to the given filename. Args: filename (str, optional) : filename to", "\"\"\"This is used when we need to call the plotting API from within", "'relative(-dev)' or 'absolute(-dev)'. In the 'relative(-dev)' case, **root_dir** can be specified to indicate", "obj.dimension==1] return _list_attr_splat(grid) def grid(): \"\"\" Get the current :class:`grid <bokeh.objects.Grid>` object(s) Returns:", "url (str, optianal) : URL of the Bokeh server (default: \"default\") if url", "curplot() if p is None: return None grid = [obj for obj in", "any existing default Server session \"\"\" output_server(docname, session=Cloud()) def output_notebook(url=None, docname=None, session=None, name=None):", "existing documents with the same name will be overwritten. session (Session, optional) :", "string argument to the webbrowser.open argument new_param = {'tab': 2, 'window': 1}[new] controller", "\"triangle\": triangle, \"x\": x, \"*\": asterisk, \"+\": cross, \"o\": circle, \"ox\": circle_x, \"o+\":", "session = cursession() notebook = _default_notebook # Map our string argument to the", "fill and line color All the :ref:`userguide_line_properties` and :ref:`userguide_fill_properties` are also accepted as", "Args: session (Sesion, optional) : filename to save document under (default: None) if", "\"\"\" Creates a scatter plot of the given x and y items. Args:", "(default: True) Returns: None ''' curdoc().hold(value) def figure(**kwargs): ''' Activate a new figure", "session.store_document(curdoc()) else: warnings.warn(\"push() called but no session was supplied and output_server(...) was never", "session : the current default session object (or None) ''' return _default_session def", "= grid # TODO (bev) don't use private attrs if _default_session: push() if", "is used Returns: None \"\"\" if filename is None and _default_file: filename =", "new_param = {'tab': 2, 'window': 1}[new] controller = browserlib.get_browser_controller(browser=browser) plot = curplot() if", "on Bokeh server An existing documents with the same name will be overwritten.", "None grid = [obj for obj in p.renderers if isinstance(obj, Grid) and obj.dimension==0]", "if name is None, use the server URL as the name Additional keyword", "snippet = autoload_server(plot, cursession()) displaypub.publish_display_data('bokeh', {'text/html': snippet}) elif notebook: import IPython.core.displaypub as displaypub", "rect, etc.) to create a current plot (see http://bokeh.pydata.org/index.html)\") return if notebook and", "the webbrowser module documentation in the standard lib for more details.) new (str,", "annulus = _doc_wrap(gf.annulus) arc = _doc_wrap(gf.arc) asterisk = _doc_wrap(gf.asterisk) bezier = _doc_wrap(gf.bezier) circle", "this case you still want the API to work but you don't want", "raises the browser window showing the current output file. If **new** is 'tab',", "off (default: True) Returns: None ''' curdoc().hold(value) def figure(**kwargs): ''' Activate a new", "accepts all plot style keyword parameters. Returns: None ''' curdoc().figure(**kwargs) def output_server(docname, session=None,", "_list_attr_splat(legends) def xgrid(): \"\"\" Get the current `x` :class:`grid <bokeh.objects.Grid>` object(s) Returns: Returns", "Returns: Returns grid object or splattable list of grid objects on the current", "(or None) ''' return curdoc().curplot() def cursession(): ''' Return the current session, if", "None) if `filename` is None, the current output_file(...) filename is used if present", "For systems that support it, the **browser** argument allows specifying which browser to", "square, \"square_x\": square_x, \"square_cross\": square_cross, \"triangle\": triangle, \"x\": x, \"*\": asterisk, \"+\": cross,", "elif notebook: import IPython.core.displaypub as displaypub displaypub.publish_display_data('bokeh', {'text/html': notebook_div(plot)}) elif session: push() if", "Args: value (bool, optional) : whether hold should be turned on or off", "Returns: Returns x-axis object or splattable list of x-axis objects on the current", "_doc_wrap(gf.image_rgba) image_url = _doc_wrap(gf.image_url) inverted_triangle = _doc_wrap(gf.inverted_triangle) line = _doc_wrap(gf.line) multi_line = _doc_wrap(gf.multi_line)", "plotting commands to automatically persist plots to a Bokeh server. Can use explicitly", "kwargs['color'] = get_default_color() if not len(_alpha_fields.intersection(set(kwargs.keys()))): kwargs['alpha'] = get_default_alpha() if markertype not in", "filename (str, optional) : filename to save document under (default: None) if `filename`", "Session logger = logging.getLogger(__name__) _default_document = Document() _default_session = None _default_file = None", "`resources` is None, the current default resource config is used Returns: None \"\"\"", "session or the top of a script. \"\"\" global _default_file _default_file = {", "is equivalent to curdoc().%s(...)\" % func.__name__ return wrapper annular_wedge = _doc_wrap(gf.annular_wedge) annulus =", "AttributeError): return _default_document def curplot(): ''' Return the current default plot object. Returns:", "url or name: if docname is None: docname = \"IPython Session at %s\"", "the beginning of an interactive session or the top of a script. \"\"\"", "by auto-raising the window or tab displaying the current plot (for file/server output", "support it, the **browser** argument allows specifying which browser to display in, e.g.", "grid(): \"\"\" Get the current :class:`grid <bokeh.objects.Grid>` object(s) Returns: Returns grid object or", "= True def output_file(filename, title=\"Bokeh Plot\", autosave=True, mode=\"inline\", root_dir=None): \"\"\" Outputs to a", "called, nothing saved\") return if not resources: warnings.warn(\"save() called but no resources was", "replaces any existing default Server session \"\"\" output_server(docname, session=Cloud()) def output_notebook(url=None, docname=None, session=None,", "Generate a plot that arranges several subplots into a grid. Args: plot_arrangement (list[:class:`Plot", "cursession()) displaypub.publish_display_data('bokeh', {'text/html': snippet}) elif notebook: import IPython.core.displaypub as displaypub displaypub.publish_display_data('bokeh', {'text/html': notebook_div(plot)})", "y items. Args: *args : The data to plot. Can be of several", "\"\"\" Generate a plot that arranges several subplots into a grid. Args: plot_arrangement", "diamond_cross = _doc_wrap(gf.diamond_cross) image = _doc_wrap(gf.image) image_rgba = _doc_wrap(gf.image_rgba) image_url = _doc_wrap(gf.image_url) inverted_triangle", "root_dir=None): \"\"\" Outputs to a static HTML file. .. note:: This file will", "as gf from .document import Document from .embed import notebook_div, file_html, autoload_server from", "the global module level document \"\"\" from flask import request doc = request.bokeh_server_document", "y-grid objects on the current plot \"\"\" p = curplot() if p is", "has been called, this will save the plot to the given filename. Args:", "hold status on the current document. This is a convenience function that acts", "Return the current document. Returns: doc : the current default document object. '''", "_doc_wrap(gf.quad) quadratic = _doc_wrap(gf.quadratic) ray = _doc_wrap(gf.ray) rect = _doc_wrap(gf.rect) segment = _doc_wrap(gf.segment)", "Bokeh server An existing documents with the same name will be overwritten. ..", "plot (or None) ''' return curdoc().curplot() def cursession(): ''' Return the current session,", "the given filename. Args: filename (str, optional) : filename to save document under", "object or splattable list of axis objects on the current plot \"\"\" p", "p4]] Returns: grid_plot: the current :class:`GridPlot <bokeh.objects.GridPlot>` \"\"\" grid = GridPlot(children=plot_arrangement) if name:", "= curplot() if not plot: warnings.warn(\"No current plot to show. Use renderer functions", "# so they don't show up twice subplots = itertools.chain.from_iterable(plot_arrangement) curdoc().get_context().children = list(set(curdoc().get_context().children)", "Return the current default plot object. Returns: plot : the current default plot", "get_default_color, get_default_alpha, _handle_1d_data_args, _list_attr_splat ) from .resources import Resources from .session import Cloud,", "is 'tab', then opens a new tab. If **new** is 'window', then opens", "the path to the various static files should be computed. .. note:: Generally,", "_default_session = None _default_file = None _default_notebook = None def curdoc(): ''' Return", "or splattable list of y-axis objects on the current plot \"\"\" p =", "import browserlib from . import _glyph_functions as gf from .document import Document from", "_doc_wrap(gf.multi_line) oval = _doc_wrap(gf.oval) patch = _doc_wrap(gf.patch) patches = _doc_wrap(gf.patches) quad = _doc_wrap(gf.quad)", "the plotcontext, # so they don't show up twice subplots = itertools.chain.from_iterable(plot_arrangement) curdoc().get_context().children", "the current document. If a filename is supplied, or output_file(...) has been called,", "None: return None axis = [obj for obj in p.renderers if isinstance(obj, Axis)", "( get_default_color, get_default_alpha, _handle_1d_data_args, _list_attr_splat ) from .resources import Resources from .session import", "default session url (str, optianal) : URL of the Bokeh server (default: \"default\")", "object. ''' try: \"\"\"This is used when we need to call the plotting", "been called, this will save the plot to the given filename. Args: filename", "import uuid import warnings from . import browserlib from . import _glyph_functions as", "Returns: None .. note:: Generally, this should be called at the beginning of", "mode (default: \"tab\") For file-based output, opens or raises the browser window showing", "url if not session: if not _default_session: _default_session = Session(name=name, root_url=url) session =", "the current plot (for file/server output modes) or displaying it in an output", "BokehJS resource config to use if `resources` is None, the current default resource", "was supplied and output_file(...) was never called, nothing saved\") return if not resources:", "is True, then every time plot() or one of the other visual functions", "on the current document. This is a convenience function that acts on the", "session.load_document(curdoc()) def output_cloud(docname): \"\"\" Cause plotting commands to automatically persist plots to the", "else None session = cursession() notebook = _default_notebook # Map our string argument", "plot (see http://bokeh.pydata.org/index.html)\") return if notebook and session: import IPython.core.displaypub as displaypub push(session=session)", "import brewer from .plotting_helpers import ( get_default_color, get_default_alpha, _handle_1d_data_args, _list_attr_splat ) from .resources", "x-axis objects on the current plot \"\"\" return _list_attr_splat(xaxis() + yaxis()) def legend():", "plots to a Bokeh server. Can use explicitly provided Session for persistence, or", "Set or clear the plot hold status on the current document. This is", "(see http://bokeh.pydata.org/index.html)\") return html = file_html(curdoc(), resources, _default_file['title']) with open(filename, \"w\") as f:", "\"\"\" return _list_attr_splat(xaxis() + yaxis()) def legend(): \"\"\" Get the current :class:`legend <bokeh.objects.Legend>`", "= _default_notebook # Map our string argument to the webbrowser.open argument new_param =", "(Resources, optional) : BokehJS resource config to use if `resources` is None, the", "Get the current `x` axis object(s) Returns: Returns x-axis object or splattable list", "curplot()? if not len(_color_fields.intersection(set(kwargs.keys()))): kwargs['color'] = get_default_color() if not len(_alpha_fields.intersection(set(kwargs.keys()))): kwargs['alpha'] = get_default_alpha()", "plot, by auto-raising the window or tab displaying the current plot (for file/server", "create a current plot (see http://bokeh.pydata.org/index.html)\") return html = file_html(curdoc(), resources, _default_file['title']) with", "\"line_color\"]) _alpha_fields = set([\"alpha\", \"fill_alpha\", \"line_alpha\"]) def scatter(*args, **kwargs): \"\"\" Creates a scatter", "session.DEFAULT_SERVER_URL name (str, optional) : if name is None, use the server URL", "grid_plot: the current :class:`GridPlot <bokeh.objects.GridPlot>` \"\"\" grid = GridPlot(children=plot_arrangement) if name: grid._id =", "type '%s'. Use markers() to see a list of valid marker types.\" %", "(XNAME, YNAME) Two bokeh DataSource/ColumnsRef marker (str, optional): a valid marker_type, defaults to", ".document import Document from .embed import notebook_div, file_html, autoload_server from .objects import Axis,", "to the Bokeh cloud server. Args: docname (str) : name of document to", "scatter plot of the given x and y items. Args: *args : The", "= \"IPython Session at %s\" % time.ctime() output_server(docname, url=url, session=session, name=name) else: from", "the current plot \"\"\" return _list_attr_splat(xaxis() + yaxis()) def legend(): \"\"\" Get the", "server An existing documents with the same name will be overwritten. session (Session,", "None ''' curdoc().hold(value) def figure(**kwargs): ''' Activate a new figure for plotting. All", "= _doc_wrap(gf.annulus) arc = _doc_wrap(gf.arc) asterisk = _doc_wrap(gf.asterisk) bezier = _doc_wrap(gf.bezier) circle =", "(ImportError, RuntimeError, AttributeError): return _default_document def curplot(): ''' Return the current default plot", "supplied and output_file(...) was never called, nothing saved\") return if not resources: warnings.warn(\"save()", ":class:`legend <bokeh.objects.Legend>` object(s) Returns: Returns legend object or splattable list of legend objects", "if isinstance(obj, Legend)] return _list_attr_splat(legends) def xgrid(): \"\"\" Get the current `x` :class:`grid", "this for example) in this case you still want the API to work", "(for file/server output modes) or displaying it in an output cell (IPython notebook).", "the plot_arrangement and remove them from the plotcontext, # so they don't show", "and obj.dimension==0] return _list_attr_splat(grid) def ygrid(): \"\"\" Get the current `y` :class:`grid <bokeh.objects.Grid>`", "name (str) : name for this plot .. note:: `plot_arrangement` can be nested,", "+ yaxis()) def legend(): \"\"\" Get the current :class:`legend <bokeh.objects.Legend>` object(s) Returns: Returns", "if cursession() and curdoc()._autostore: push() if _default_file and _default_file['autosave']: save() return retval wrapper.__doc__", "wrapper(*args, **kwargs): retval = func(curdoc(), *args, **kwargs) if cursession() and curdoc()._autostore: push() if", "note:: This file will be overwritten each time show() or save() is invoked.", "save() return grid def xaxis(): \"\"\" Get the current axis objects Returns: Returns", "not filename: warnings.warn(\"save() called but no filename was supplied and output_file(...) was never", "(circle, rect, etc.) to create a current plot (see http://bokeh.pydata.org/index.html)\") return if notebook", "want the API to work but you don't want to use the global", "the file is only saved upon calling show(). mode (str, optional) : how", "on the current plot \"\"\" return _list_attr_splat(xgrid() + ygrid()) def load_object(obj): \"\"\"updates object", "= func(curdoc(), *args, **kwargs) if cursession() and curdoc()._autostore: push() if _default_file and _default_file['autosave']:", "wraps import itertools import time import logging import os import uuid import warnings", "called, nothing saved\") return if not curplot(): warnings.warn(\"No current plot to save. Use", "'relative(-dev)' case, **root_dir** can be specified to indicate the base directory from which", "automatically persist plots to the Bokeh cloud server. Args: docname (str) : name", "the server with the data for the current document. Args: session (Sesion, optional)", "Returns y-grid object or splattable list of y-grid objects on the current plot", "is None, use the default session url (str, optianal) : URL of the", "or splattable list of grid objects on the current plot \"\"\" return _list_attr_splat(xgrid()", "Args: *args : The data to plot. Can be of several forms: (X,", "only saved upon calling show(). mode (str, optional) : how to inlude BokehJS", "will save the plot to the given filename. Args: filename (str, optional) :", "_doc_wrap(gf.ray) rect = _doc_wrap(gf.rect) segment = _doc_wrap(gf.segment) square = _doc_wrap(gf.square) square_cross = _doc_wrap(gf.square_cross)", "the beginning of an interactive session or the top of a script. ..", "at %s\" % time.ctime() output_server(docname, url=url, session=session, name=name) else: from . import load_notebook", "\"line_alpha\"]) def scatter(*args, **kwargs): \"\"\" Creates a scatter plot of the given x", "can be 'inline', 'cdn', 'relative(-dev)' or 'absolute(-dev)'. In the 'relative(-dev)' case, **root_dir** can", "cross, \"diamond\": diamond, \"diamond_cross\": diamond_cross, \"inverted_triangle\": inverted_triangle, \"square\": square, \"square_x\": square_x, \"square_cross\": square_cross,", "circle_x, \"cross\": cross, \"diamond\": diamond, \"diamond_cross\": diamond_cross, \"inverted_triangle\": inverted_triangle, \"square\": square, \"square_x\": square_x,", "\"data2\", source=data_source, ...) \"\"\" ds = kwargs.get(\"source\", None) names, datasource = _handle_1d_data_args(args, datasource=ds)", "displaying it in an output cell (IPython notebook). Args: browser (str, optional) :", "a new figure for plotting. All subsequent plotting operations will affect the new", ": name of document to push on Bokeh server An existing documents with", "should be computed. .. note:: Generally, this should be called at the beginning", ">>> scatter([1,2,3],[4,5,6], fill_color=\"red\") >>> scatter(\"data1\", \"data2\", source=data_source, ...) \"\"\" ds = kwargs.get(\"source\", None)", "use explicitly provided Session for persistence, or the default session. Args: docname (str)", "document=None): \"\"\" Updates the server with the data for the current document. Args:", "**base_url** can also be supplied. Returns: None .. note:: Generally, this should be", "is False, then the file is only saved upon calling show(). mode (str,", "Two bokeh DataSource/ColumnsRef marker (str, optional): a valid marker_type, defaults to \"circle\" color", "grid = GridPlot(children=plot_arrangement) if name: grid._id = name # Walk the plot_arrangement and", "scatter(*args, **kwargs): \"\"\" Creates a scatter plot of the given x and y", "several forms: (X, Y) Two 1D arrays or iterables (XNAME, YNAME) Two bokeh", "\"\"\" Cause plotting commands to automatically persist plots to a Bokeh server. Can", "is None, the current default document is pushed Returns: None \"\"\" if not", "current document, and is equivalent to curdoc().%s(...)\" % func.__name__ return wrapper annular_wedge =", "marker (str, optional): a valid marker_type, defaults to \"circle\" color (color value, optional):", "func(curdoc(), *args, **kwargs) if cursession() and curdoc()._autostore: push() if _default_file and _default_file['autosave']: save()", "import time import logging import os import uuid import warnings from . import", "tab. If **new** is 'window', then opens a new window. \"\"\" filename =", "plot : the current default plot (or None) ''' return curdoc().curplot() def cursession():", "the current `y` axis object(s) Returns: Returns y-axis object or splattable list of", "to indicate the base directory from which the path to the various static", "flask request\") return doc except (ImportError, RuntimeError, AttributeError): return _default_document def curplot(): '''", "retval wrapper.__doc__ += \"\\nThis is a convenience function that acts on the current", "resources, _default_file['title']) with open(filename, \"w\") as f: f.write(html) def push(session=None, document=None): \"\"\" Updates", "'title' : title, } if os.path.isfile(filename): print(\"Session output file '%s' already exists, will", "curplot() if not plot: warnings.warn(\"No current plot to show. Use renderer functions (circle,", "is 'window', then opens a new window. \"\"\" filename = _default_file['filename'] if _default_file", "if not plot: warnings.warn(\"No current plot to show. Use renderer functions (circle, rect,", "ds = kwargs.get(\"source\", None) names, datasource = _handle_1d_data_args(args, datasource=ds) kwargs[\"source\"] = datasource markertype", "automatically persist plots to a Bokeh server. Can use explicitly provided Session for", "import warnings from . import browserlib from . import _glyph_functions as gf from", "never called, nothing saved\") return if not curplot(): warnings.warn(\"No current plot to save.", "the current document, and is equivalent to curdoc().hold(...) Args: value (bool, optional) :", "will be overwritten each time show() or save() is invoked. Args: autosave (bool,", "segment = _doc_wrap(gf.segment) square = _doc_wrap(gf.square) square_cross = _doc_wrap(gf.square_cross) square_x = _doc_wrap(gf.square_x) text", "into a grid. Args: plot_arrangement (list[:class:`Plot <bokeh.objects.Plot>`]) : plots to arrange in a", "object or splattable list of x-axis objects on the current plot \"\"\" return", "import Cloud, DEFAULT_SERVER_URL, Session logger = logging.getLogger(__name__) _default_document = Document() _default_session = None", "show up twice subplots = itertools.chain.from_iterable(plot_arrangement) curdoc().get_context().children = list(set(curdoc().get_context().children) - set(subplots)) curdoc().add(grid) curdoc()._current_plot", "True, then every time plot() or one of the other visual functions is", "context. (Applets do this for example) in this case you still want the", "ygrid(): \"\"\" Get the current `y` :class:`grid <bokeh.objects.Grid>` object(s) Returns: Returns y-grid object", "top of a script. .. note:: Calling this function will replaces any existing", "and **base_url** can also be supplied. Returns: None .. note:: Generally, this should", "obj.dimension==0] return _list_attr_splat(grid) def ygrid(): \"\"\" Get the current `y` :class:`grid <bokeh.objects.Grid>` object(s)", "if p is None: return None axis = [obj for obj in p.renderers", "session is used if present document (Document, optional) : BokehJS document to push", "browser to show with (default: None) For systems that support it, the **browser**", ": BokehJS resource config to use if `resources` is None, the current default", "output_server(docname, url=url, session=session, name=name) else: from . import load_notebook load_notebook() global _default_notebook _default_notebook", "Bokeh server (default: \"default\") if url is \"default\" use session.DEFAULT_SERVER_URL name (str, optional)", "if not len(_color_fields.intersection(set(kwargs.keys()))): kwargs['color'] = get_default_color() if not len(_alpha_fields.intersection(set(kwargs.keys()))): kwargs['alpha'] = get_default_alpha() if", "the browser window showing the current output file. If **new** is 'tab', then", "None, the current output_file(...) filename is used if present resources (Resources, optional) :", "<bokeh.objects.GridPlot>` \"\"\" grid = GridPlot(children=plot_arrangement) if name: grid._id = name # Walk the", "square = _doc_wrap(gf.square) square_cross = _doc_wrap(gf.square_cross) square_x = _doc_wrap(gf.square_x) text = _doc_wrap(gf.text) triangle", "obj.dimension==1] return _list_attr_splat(axis) def axis(): \"\"\" Get the current `x` axis object(s) Returns:", "or the top of a script. \"\"\" global _default_file _default_file = { 'filename'", "will be overwritten. session (Session, optional) : An explicit session to use (default:", "resource config to use if `resources` is None, the current default resource config", "Bokeh server. Can use explicitly provided Session for persistence, or the default session.", "filename = _default_file['filename'] if resources is None and _default_file: resources = _default_file['resources'] if", "is None, use the server URL as the name Additional keyword arguments like", "(Sesion, optional) : filename to save document under (default: None) if `sessiokn` is", "bokeh DataSource/ColumnsRef marker (str, optional): a valid marker_type, defaults to \"circle\" color (color", "list of legend objects on the current plot \"\"\" p = curplot() if", "optional) : how to inlude BokehJS (default: \"inline\") **mode** can be 'inline', 'cdn',", "_default_file['autosave']: save() return grid def xaxis(): \"\"\" Get the current axis objects Returns:", "session to use (default: None) If session is None, use the default session", "= curplot() if p is None: return None axis = [obj for obj", "optional): shorthand to set both fill and line color All the :ref:`userguide_line_properties` and", "argument allows specifying which browser to display in, e.g. \"safari\", \"firefox\", \"opera\", \"windows-default\".", "window. \"\"\" filename = _default_file['filename'] if _default_file else None session = cursession() notebook", "clear the plot hold status on the current document. This is a convenience", "def push(session=None, document=None): \"\"\" Updates the server with the data for the current", "same name will be overwritten. session (Session, optional) : An explicit session to", "= _default_file['resources'] if not filename: warnings.warn(\"save() called but no filename was supplied and", "API from within the server, within a request context. (Applets do this for", "cursession() and curdoc()._autostore: push() if _default_file and _default_file['autosave']: save() return retval wrapper.__doc__ +=", "docname is None: docname = \"IPython Session at %s\" % time.ctime() output_server(docname, url=url,", "new=new_param) else: controller.open(session.object_link(curdoc()._plotcontext)) elif filename: save(filename) controller.open(\"file://\" + os.path.abspath(filename), new=new_param) def save(filename=None, resources=None):", "current plot \"\"\" p = curplot() if p is None: return None axis", "\"\"\" if filename is None and _default_file: filename = _default_file['filename'] if resources is", "no filename was supplied and output_file(...) was never called, nothing saved\") return if", "axis objects on the current plot \"\"\" p = curplot() if p is", "push(session=session) snippet = autoload_server(plot, cursession()) displaypub.publish_display_data('bokeh', {'text/html': snippet}) elif notebook: import IPython.core.displaypub as", "several subplots into a grid. Args: plot_arrangement (list[:class:`Plot <bokeh.objects.Plot>`]) : plots to arrange", "this should be called at the beginning of an interactive session or the", "or tab displaying the current plot (for file/server output modes) or displaying it", "color All the :ref:`userguide_line_properties` and :ref:`userguide_fill_properties` are also accepted as keyword parameters. Examples:", "= _doc_wrap(gf.quadratic) ray = _doc_wrap(gf.ray) rect = _doc_wrap(gf.rect) segment = _doc_wrap(gf.segment) square =", "used when we need to call the plotting API from within the server,", "ValueError(\"Invalid marker type '%s'. Use markers() to see a list of valid marker", "argument to the webbrowser.open argument new_param = {'tab': 2, 'window': 1}[new] controller =", "save document under (default: None) if `filename` is None, the current output_file(...) filename", "if not _default_session: _default_session = Session(name=name, root_url=url) session = _default_session session.use_doc(docname) session.load_document(curdoc()) def", "if url: controller.open(url, new=new_param) else: controller.open(session.object_link(curdoc()._plotcontext)) elif filename: save(filename) controller.open(\"file://\" + os.path.abspath(filename), new=new_param)", "f.write(html) def push(session=None, document=None): \"\"\" Updates the server with the data for the", "controller.open(\"file://\" + os.path.abspath(filename), new=new_param) def save(filename=None, resources=None): \"\"\" Updates the file with the", "p is None: return None grid = [obj for obj in p.renderers if", "a Bokeh server. Can use explicitly provided Session for persistence, or the default", "obj.dimension==0] return _list_attr_splat(axis) def yaxis(): \"\"\" Get the current `y` axis object(s) Returns:", "if p is None: return None legends = [obj for obj in p.renderers", "output_file(...) has been called, this will save the plot to the given filename.", "show() or save() is invoked. Args: autosave (bool, optional) : whether to automatically", "or the default session. Args: docname (str) : name of document to push", "like **username**, **userapikey**, and **base_url** can also be supplied. Returns: None .. note::", "return if not curplot(): warnings.warn(\"No current plot to save. Use renderer functions (circle,", "import Resources from .session import Cloud, DEFAULT_SERVER_URL, Session logger = logging.getLogger(__name__) _default_document =", "a script. \"\"\" global _default_file _default_file = { 'filename' : filename, 'resources' :", "import load_notebook load_notebook() global _default_notebook _default_notebook = True def output_file(filename, title=\"Bokeh Plot\", autosave=True,", "return grid def xaxis(): \"\"\" Get the current axis objects Returns: Returns axis", "_list_attr_splat ) from .resources import Resources from .session import Cloud, DEFAULT_SERVER_URL, Session logger", "Args: filename (str, optional) : filename to save document under (default: None) if", "base directory from which the path to the various static files should be", "_doc_wrap(gf.patches) quad = _doc_wrap(gf.quad) quadratic = _doc_wrap(gf.quadratic) ray = _doc_wrap(gf.ray) rect = _doc_wrap(gf.rect)", "request\") return doc except (ImportError, RuntimeError, AttributeError): return _default_document def curplot(): ''' Return", "show. Use renderer functions (circle, rect, etc.) to create a current plot (see", "no resources was supplied and output_file(...) was never called, nothing saved\") return if", "to set both fill and line color All the :ref:`userguide_line_properties` and :ref:`userguide_fill_properties` are", "save the plot to the given filename. Args: filename (str, optional) : filename", "config to use if `resources` is None, the current default resource config is", "# Walk the plot_arrangement and remove them from the plotcontext, # so they", "_doc_wrap(gf.square_cross) square_x = _doc_wrap(gf.square_x) text = _doc_wrap(gf.text) triangle = _doc_wrap(gf.triangle) wedge = _doc_wrap(gf.wedge)", ": filename to save document under (default: None) if `filename` is None, the", "curdoc().%s(...)\" % func.__name__ return wrapper annular_wedge = _doc_wrap(gf.annular_wedge) annulus = _doc_wrap(gf.annulus) arc =", "data for the current document. Args: session (Sesion, optional) : filename to save", "Cloud, DEFAULT_SERVER_URL, Session logger = logging.getLogger(__name__) _default_document = Document() _default_session = None _default_file", "session=Cloud()) def output_notebook(url=None, docname=None, session=None, name=None): if session or url or name: if", "parameters. Returns: None ''' curdoc().figure(**kwargs) def output_server(docname, session=None, url=\"default\", name=None): \"\"\" Cause plotting", "_default_notebook _default_notebook = True def output_file(filename, title=\"Bokeh Plot\", autosave=True, mode=\"inline\", root_dir=None): \"\"\" Outputs", "Prints a list of valid marker types for scatter() Returns: None \"\"\" print(list(sorted(_marker_types.keys())))", "for persistence, or the default session. Args: docname (str) : name of document", "plot to save. Use renderer functions (circle, rect, etc.) to create a current", "global _default_session if url == \"default\": url = DEFAULT_SERVER_URL if name is None:", "\"\"\" p = curplot() if p is None: return None axis = [obj", "p is None: return None axis = [obj for obj in p.renderers if", "case, **root_dir** can be specified to indicate the base directory from which the", "set both fill and line color All the :ref:`userguide_line_properties` and :ref:`userguide_fill_properties` are also", ". import load_notebook load_notebook() global _default_notebook _default_notebook = True def output_file(filename, title=\"Bokeh Plot\",", "obj in p.renderers if isinstance(obj, Axis) and obj.dimension==1] return _list_attr_splat(axis) def axis(): \"\"\"", "output_server(docname, session=Cloud()) def output_notebook(url=None, docname=None, session=None, name=None): if session or url or name:", "TODO (bev) don't use private attrs if _default_session: push() if _default_file and _default_file['autosave']:", "the current default resource config is used Returns: None \"\"\" if filename is", "+ os.path.abspath(filename), new=new_param) def save(filename=None, resources=None): \"\"\" Updates the file with the data", "valid marker types for scatter() Returns: None \"\"\" print(list(sorted(_marker_types.keys()))) _color_fields = set([\"color\", \"fill_color\",", "try: \"\"\"This is used when we need to call the plotting API from", "autoload_server from .objects import Axis, ColumnDataSource, Glyph, Grid, GridPlot, Legend from .palettes import", "the top of a script. .. note:: Calling this function will replaces any", "autosave=True, mode=\"inline\", root_dir=None): \"\"\" Outputs to a static HTML file. .. note:: This", "Creates a scatter plot of the given x and y items. Args: *args", "x and y items. Args: *args : The data to plot. Can be", "Get the current `y` :class:`grid <bokeh.objects.Grid>` object(s) Returns: Returns y-grid object or splattable", "don't show up twice subplots = itertools.chain.from_iterable(plot_arrangement) curdoc().get_context().children = list(set(curdoc().get_context().children) - set(subplots)) curdoc().add(grid)", "exists, will be overwritten.\" % filename) def show(browser=None, new=\"tab\", url=None): \"\"\" 'shows' the", "(default: True) If **autosave** is True, then every time plot() or one of", "session. Args: docname (str) : name of document to push on Bokeh server", "None) ''' return _default_session def hold(value=True): ''' Set or clear the plot hold", "diamond_cross, \"inverted_triangle\": inverted_triangle, \"square\": square, \"square_x\": square_x, \"square_cross\": square_cross, \"triangle\": triangle, \"x\": x,", "not in _marker_types: raise ValueError(\"Invalid marker type '%s'. Use markers() to see a", "warnings from . import browserlib from . import _glyph_functions as gf from .document", "list of axis objects on the current plot \"\"\" p = curplot() if", "Server session \"\"\" output_server(docname, session=Cloud()) def output_notebook(url=None, docname=None, session=None, name=None): if session or", "return html = file_html(curdoc(), resources, _default_file['title']) with open(filename, \"w\") as f: f.write(html) def", "new figure. This function accepts all plot style keyword parameters. Returns: None '''", "Use renderer functions (circle, rect, etc.) to create a current plot (see http://bokeh.pydata.org/index.html)\")", "return None legends = [obj for obj in p.renderers if isinstance(obj, Legend)] return", "session=None, name=None): if session or url or name: if docname is None: docname", "except (ImportError, RuntimeError, AttributeError): return _default_document def curplot(): ''' Return the current default", "for obj in p.renderers if isinstance(obj, Axis) and obj.dimension==0] return _list_attr_splat(axis) def yaxis():", "resources = _default_file['resources'] if not filename: warnings.warn(\"save() called but no filename was supplied", "How to handle this? Just call curplot()? if not len(_color_fields.intersection(set(kwargs.keys()))): kwargs['color'] = get_default_color()", "= _doc_wrap(gf.square_x) text = _doc_wrap(gf.text) triangle = _doc_wrap(gf.triangle) wedge = _doc_wrap(gf.wedge) x =", "def output_server(docname, session=None, url=\"default\", name=None): \"\"\" Cause plotting commands to automatically persist plots", "the other visual functions is called, this causes the file to be saved.", "a scatter plot of the given x and y items. Args: *args :", "scatter(\"data1\", \"data2\", source=data_source, ...) \"\"\" ds = kwargs.get(\"source\", None) names, datasource = _handle_1d_data_args(args,", "resources (Resources, optional) : BokehJS resource config to use if `resources` is None,", "beginning of an interactive session or the top of a script. \"\"\" global", "cross = _doc_wrap(gf.cross) diamond = _doc_wrap(gf.diamond) diamond_cross = _doc_wrap(gf.diamond_cross) image = _doc_wrap(gf.image) image_rgba", "never called, nothing pushed\") def _doc_wrap(func): @wraps(func) def wrapper(*args, **kwargs): retval = func(curdoc(),", "the current document. Args: session (Sesion, optional) : filename to save document under", "'inline', 'cdn', 'relative(-dev)' or 'absolute(-dev)'. In the 'relative(-dev)' case, **root_dir** can be specified", "the same name will be overwritten. .. note:: Generally, this should be called", "_doc_wrap(gf.bezier) circle = _doc_wrap(gf.circle) circle_cross = _doc_wrap(gf.circle_cross) circle_x = _doc_wrap(gf.circle_x) cross = _doc_wrap(gf.cross)", "called, nothing pushed\") def _doc_wrap(func): @wraps(func) def wrapper(*args, **kwargs): retval = func(curdoc(), *args,", "= curplot() if p is None: return None grid = [obj for obj", "curdoc()._autostore: push() if _default_file and _default_file['autosave']: save() return retval wrapper.__doc__ += \"\\nThis is", "def grid(): \"\"\" Get the current :class:`grid <bokeh.objects.Grid>` object(s) Returns: Returns grid object", "curplot(): warnings.warn(\"No current plot to save. Use renderer functions (circle, rect, etc.) to", "In the 'relative(-dev)' case, **root_dir** can be specified to indicate the base directory", "f: f.write(html) def push(session=None, document=None): \"\"\" Updates the server with the data for", "curdoc()._current_plot = grid # TODO (bev) don't use private attrs if _default_session: push()", "if not len(_alpha_fields.intersection(set(kwargs.keys()))): kwargs['alpha'] = get_default_alpha() if markertype not in _marker_types: raise ValueError(\"Invalid", "grid def xaxis(): \"\"\" Get the current axis objects Returns: Returns axis object", "pushed Returns: None \"\"\" if not session: session = cursession() if not document:", "_default_session if url == \"default\": url = DEFAULT_SERVER_URL if name is None: name", "(X, Y) Two 1D arrays or iterables (XNAME, YNAME) Two bokeh DataSource/ColumnsRef marker", "= _doc_wrap(gf.wedge) x = _doc_wrap(gf.x) _marker_types = { \"asterisk\": asterisk, \"circle\": circle, \"circle_cross\":", "\"safari\", \"firefox\", \"opera\", \"windows-default\". (See the webbrowser module documentation in the standard lib", "and obj.dimension==0] return _list_attr_splat(axis) def yaxis(): \"\"\" Get the current `y` axis object(s)", "return _list_attr_splat(xgrid() + ygrid()) def load_object(obj): \"\"\"updates object from the server \"\"\" cursession().load_object(obj,", "plot. Can be of several forms: (X, Y) Two 1D arrays or iterables", "if not filename: warnings.warn(\"save() called but no filename was supplied and output_file(...) was", "\"\"\" Get the current `y` axis object(s) Returns: Returns y-axis object or splattable", "def xaxis(): \"\"\" Get the current axis objects Returns: Returns axis object or", "\"default\") if url is \"default\" use session.DEFAULT_SERVER_URL name (str, optional) : if name", "x = _doc_wrap(gf.x) _marker_types = { \"asterisk\": asterisk, \"circle\": circle, \"circle_cross\": circle_cross, \"circle_x\":", "current document, and is equivalent to curdoc().hold(...) Args: value (bool, optional) : whether", "it is False, then the file is only saved upon calling show(). mode", "_default_session def hold(value=True): ''' Set or clear the plot hold status on the", "\"asterisk\": asterisk, \"circle\": circle, \"circle_cross\": circle_cross, \"circle_x\": circle_x, \"cross\": cross, \"diamond\": diamond, \"diamond_cross\":", "p.renderers if isinstance(obj, Axis) and obj.dimension==1] return _list_attr_splat(axis) def axis(): \"\"\" Get the", "Session at %s\" % time.ctime() output_server(docname, url=url, session=session, name=name) else: from . import", "types for scatter() Returns: None \"\"\" print(list(sorted(_marker_types.keys()))) _color_fields = set([\"color\", \"fill_color\", \"line_color\"]) _alpha_fields", "return None grid = [obj for obj in p.renderers if isinstance(obj, Grid) and", "_default_file: filename = _default_file['filename'] if resources is None and _default_file: resources = _default_file['resources']", "curdoc().figure(**kwargs) def output_server(docname, session=None, url=\"default\", name=None): \"\"\" Cause plotting commands to automatically persist", "is None and _default_file: resources = _default_file['resources'] if not filename: warnings.warn(\"save() called but", "a request context. (Applets do this for example) in this case you still", "return _list_attr_splat(grid) def grid(): \"\"\" Get the current :class:`grid <bokeh.objects.Grid>` object(s) Returns: Returns", "Returns y-axis object or splattable list of y-axis objects on the current plot", "a list of valid marker types.\" % markertype) return _marker_types[markertype](*args, **kwargs) def gridplot(plot_arrangement,", "square_cross = _doc_wrap(gf.square_cross) square_x = _doc_wrap(gf.square_x) text = _doc_wrap(gf.text) triangle = _doc_wrap(gf.triangle) wedge", "session is None, use the default session url (str, optianal) : URL of", "function that acts on the current document, and is equivalent to curdoc().%s(...)\" %", "wedge = _doc_wrap(gf.wedge) x = _doc_wrap(gf.x) _marker_types = { \"asterisk\": asterisk, \"circle\": circle,", "plot .. note:: `plot_arrangement` can be nested, e.g [[p1, p2], [p3, p4]] Returns:", "None, the current default document is pushed Returns: None \"\"\" if not session:", "both fill and line color All the :ref:`userguide_line_properties` and :ref:`userguide_fill_properties` are also accepted", "etc.) to create a current plot (see http://bokeh.pydata.org/index.html)\") return if notebook and session:", "Bokeh cloud server. Args: docname (str) : name of document to push on", "Args: autosave (bool, optional) : whether to automatically save (default: True) If **autosave**", "can also be supplied. Returns: None .. note:: Generally, this should be called", "use if `resources` is None, the current default resource config is used Returns:", "object(s) Returns: Returns grid object or splattable list of grid objects on the", "save (default: True) If **autosave** is True, then every time plot() or one", "arranges several subplots into a grid. Args: plot_arrangement (list[:class:`Plot <bokeh.objects.Plot>`]) : plots to", "to handle this? Just call curplot()? if not len(_color_fields.intersection(set(kwargs.keys()))): kwargs['color'] = get_default_color() if", "optional) : if name is None, use the server URL as the name", "os.path.isfile(filename): print(\"Session output file '%s' already exists, will be overwritten.\" % filename) def", "the default session. Args: docname (str) : name of document to push on", "(str, optional) : filename to save document under (default: None) if `filename` is", "the data for the current document. Args: session (Sesion, optional) : filename to", "def xgrid(): \"\"\" Get the current `x` :class:`grid <bokeh.objects.Grid>` object(s) Returns: Returns legend", "p.renderers if isinstance(obj, Legend)] return _list_attr_splat(legends) def xgrid(): \"\"\" Get the current `x`", "this function will replaces any existing default Server session \"\"\" global _default_session if", "persist plots to a Bokeh server. Can use explicitly provided Session for persistence,", "name will be overwritten. session (Session, optional) : An explicit session to use", "wrapper.__doc__ += \"\\nThis is a convenience function that acts on the current document,", "p2], [p3, p4]] Returns: grid_plot: the current :class:`GridPlot <bokeh.objects.GridPlot>` \"\"\" grid = GridPlot(children=plot_arrangement)", "(IPython notebook). Args: browser (str, optional) : browser to show with (default: None)", "save() is invoked. Args: autosave (bool, optional) : whether to automatically save (default:", "for obj in p.renderers if isinstance(obj, Legend)] return _list_attr_splat(legends) def xgrid(): \"\"\" Get", "or one of the other visual functions is called, this causes the file", "or iterables (XNAME, YNAME) Two bokeh DataSource/ColumnsRef marker (str, optional): a valid marker_type,", "(default: \"default\") if url is \"default\" use session.DEFAULT_SERVER_URL name (str, optional) : if", "used if present resources (Resources, optional) : BokehJS resource config to use if", "_doc_wrap(gf.wedge) x = _doc_wrap(gf.x) _marker_types = { \"asterisk\": asterisk, \"circle\": circle, \"circle_cross\": circle_cross,", "or save() is invoked. Args: autosave (bool, optional) : whether to automatically save", "twice subplots = itertools.chain.from_iterable(plot_arrangement) curdoc().get_context().children = list(set(curdoc().get_context().children) - set(subplots)) curdoc().add(grid) curdoc()._current_plot = grid", "(str, optional) : how to inlude BokehJS (default: \"inline\") **mode** can be 'inline',", "figure(**kwargs): ''' Activate a new figure for plotting. All subsequent plotting operations will", "= _doc_wrap(gf.arc) asterisk = _doc_wrap(gf.asterisk) bezier = _doc_wrap(gf.bezier) circle = _doc_wrap(gf.circle) circle_cross =", "our string argument to the webbrowser.open argument new_param = {'tab': 2, 'window': 1}[new]", "style keyword parameters. Returns: None ''' curdoc().figure(**kwargs) def output_server(docname, session=None, url=\"default\", name=None): \"\"\"", "print_function from functools import wraps import itertools import time import logging import os", "- set(subplots)) curdoc().add(grid) curdoc()._current_plot = grid # TODO (bev) don't use private attrs", "None legends = [obj for obj in p.renderers if isinstance(obj, Legend)] return _list_attr_splat(legends)", "datasource markertype = kwargs.get(\"marker\", \"circle\") # TODO: How to handle this? Just call", "p is None: return None legends = [obj for obj in p.renderers if", "there is one. Returns: session : the current default session object (or None)", "\"\"\" Updates the server with the data for the current document. Args: session", "diamond, \"diamond_cross\": diamond_cross, \"inverted_triangle\": inverted_triangle, \"square\": square, \"square_x\": square_x, \"square_cross\": square_cross, \"triangle\": triangle,", "For file-based output, opens or raises the browser window showing the current output", "from the plotcontext, # so they don't show up twice subplots = itertools.chain.from_iterable(plot_arrangement)", "document \"\"\" from flask import request doc = request.bokeh_server_document logger.debug(\"returning config from flask", "fill_color=\"red\") >>> scatter(\"data1\", \"data2\", source=data_source, ...) \"\"\" ds = kwargs.get(\"source\", None) names, datasource", "def curplot(): ''' Return the current default plot object. Returns: plot : the", "} def markers(): \"\"\" Prints a list of valid marker types for scatter()", "a convenience function that acts on the current document, and is equivalent to", "server An existing documents with the same name will be overwritten. .. note::", "session \"\"\" output_server(docname, session=Cloud()) def output_notebook(url=None, docname=None, session=None, name=None): if session or url", "[obj for obj in p.renderers if isinstance(obj, Grid) and obj.dimension==0] return _list_attr_splat(grid) def", "tab displaying the current plot (for file/server output modes) or displaying it in", "markertype not in _marker_types: raise ValueError(\"Invalid marker type '%s'. Use markers() to see", "DEFAULT_SERVER_URL if name is None: name = url if not session: if not", "current document. This is a convenience function that acts on the current document,", "{'tab': 2, 'window': 1}[new] controller = browserlib.get_browser_controller(browser=browser) plot = curplot() if not plot:", "be nested, e.g [[p1, p2], [p3, p4]] Returns: grid_plot: the current :class:`GridPlot <bokeh.objects.GridPlot>`", "\"\"\" 'shows' the current plot, by auto-raising the window or tab displaying the", "keyword parameters. Examples: >>> scatter([1,2,3],[4,5,6], fill_color=\"red\") >>> scatter(\"data1\", \"data2\", source=data_source, ...) \"\"\" ds", "if p is None: return None grid = [obj for obj in p.renderers", "**browser** argument allows specifying which browser to display in, e.g. \"safari\", \"firefox\", \"opera\",", "title, } if os.path.isfile(filename): print(\"Session output file '%s' already exists, will be overwritten.\"", "quadratic = _doc_wrap(gf.quadratic) ray = _doc_wrap(gf.ray) rect = _doc_wrap(gf.rect) segment = _doc_wrap(gf.segment) square", "controller = browserlib.get_browser_controller(browser=browser) plot = curplot() if not plot: warnings.warn(\"No current plot to", "plot \"\"\" p = curplot() if p is None: return None grid =", "import ( get_default_color, get_default_alpha, _handle_1d_data_args, _list_attr_splat ) from .resources import Resources from .session", "arrays or iterables (XNAME, YNAME) Two bokeh DataSource/ColumnsRef marker (str, optional): a valid", "The data to plot. Can be of several forms: (X, Y) Two 1D", "if _default_session: push() if _default_file and _default_file['autosave']: save() return grid def xaxis(): \"\"\"", "current output_file(...) filename is used if present resources (Resources, optional) : BokehJS resource", "documentation in the standard lib for more details.) new (str, optional) : new", "\"\"\" print(list(sorted(_marker_types.keys()))) _color_fields = set([\"color\", \"fill_color\", \"line_color\"]) _alpha_fields = set([\"alpha\", \"fill_alpha\", \"line_alpha\"]) def", "import print_function from functools import wraps import itertools import time import logging import", "def axis(): \"\"\" Get the current `x` axis object(s) Returns: Returns x-axis object", "global _default_file _default_file = { 'filename' : filename, 'resources' : Resources(mode=mode, root_dir=root_dir, minified=False),", "call curplot()? if not len(_color_fields.intersection(set(kwargs.keys()))): kwargs['color'] = get_default_color() if not len(_alpha_fields.intersection(set(kwargs.keys()))): kwargs['alpha'] =", "splattable list of axis objects on the current plot \"\"\" p = curplot()", ":class:`GridPlot <bokeh.objects.GridPlot>` \"\"\" grid = GridPlot(children=plot_arrangement) if name: grid._id = name # Walk", "types.\" % markertype) return _marker_types[markertype](*args, **kwargs) def gridplot(plot_arrangement, name=None): \"\"\" Generate a plot", "config from flask request\") return doc except (ImportError, RuntimeError, AttributeError): return _default_document def", "'filename' : filename, 'resources' : Resources(mode=mode, root_dir=root_dir, minified=False), 'autosave' : autosave, 'title' :", "output_server(docname, session=None, url=\"default\", name=None): \"\"\" Cause plotting commands to automatically persist plots to", "subplots into a grid. Args: plot_arrangement (list[:class:`Plot <bokeh.objects.Plot>`]) : plots to arrange in", "True) If **autosave** is True, then every time plot() or one of the", "and output_file(...) was never called, nothing saved\") return if not resources: warnings.warn(\"save() called", "= _handle_1d_data_args(args, datasource=ds) kwargs[\"source\"] = datasource markertype = kwargs.get(\"marker\", \"circle\") # TODO: How", "\"\"\" global _default_file _default_file = { 'filename' : filename, 'resources' : Resources(mode=mode, root_dir=root_dir,", "from . import load_notebook load_notebook() global _default_notebook _default_notebook = True def output_file(filename, title=\"Bokeh", "= datasource markertype = kwargs.get(\"marker\", \"circle\") # TODO: How to handle this? Just", "the current document, and is equivalent to curdoc().%s(...)\" % func.__name__ return wrapper annular_wedge", "% markertype) return _marker_types[markertype](*args, **kwargs) def gridplot(plot_arrangement, name=None): \"\"\" Generate a plot that", "can be nested, e.g [[p1, p2], [p3, p4]] Returns: grid_plot: the current :class:`GridPlot", "the server URL as the name Additional keyword arguments like **username**, **userapikey**, and", "opens a new tab. If **new** is 'window', then opens a new window.", "plot that arranges several subplots into a grid. Args: plot_arrangement (list[:class:`Plot <bokeh.objects.Plot>`]) :", ":ref:`userguide_line_properties` and :ref:`userguide_fill_properties` are also accepted as keyword parameters. Examples: >>> scatter([1,2,3],[4,5,6], fill_color=\"red\")", "if session or url or name: if docname is None: docname = \"IPython", "(default: \"tab\") For file-based output, opens or raises the browser window showing the", "or url or name: if docname is None: docname = \"IPython Session at", "isinstance(obj, Grid) and obj.dimension==1] return _list_attr_splat(grid) def grid(): \"\"\" Get the current :class:`grid", "of valid marker types.\" % markertype) return _marker_types[markertype](*args, **kwargs) def gridplot(plot_arrangement, name=None): \"\"\"", "output_file(...) filename is used if present resources (Resources, optional) : BokehJS resource config", "_doc_wrap(gf.quadratic) ray = _doc_wrap(gf.ray) rect = _doc_wrap(gf.rect) segment = _doc_wrap(gf.segment) square = _doc_wrap(gf.square)", "''' Return the current document. Returns: doc : the current default document object.", "Calling this function will replaces any existing default Server session \"\"\" output_server(docname, session=Cloud())", ".palettes import brewer from .plotting_helpers import ( get_default_color, get_default_alpha, _handle_1d_data_args, _list_attr_splat ) from", "image_url = _doc_wrap(gf.image_url) inverted_triangle = _doc_wrap(gf.inverted_triangle) line = _doc_wrap(gf.line) multi_line = _doc_wrap(gf.multi_line) oval", "the current :class:`grid <bokeh.objects.Grid>` object(s) Returns: Returns grid object or splattable list of", "(str, optional): a valid marker_type, defaults to \"circle\" color (color value, optional): shorthand", "current default document object. ''' try: \"\"\"This is used when we need to", "Cause plotting commands to automatically persist plots to the Bokeh cloud server. Args:", ".objects import Axis, ColumnDataSource, Glyph, Grid, GridPlot, Legend from .palettes import brewer from", "use session.DEFAULT_SERVER_URL name (str, optional) : if name is None, use the server", "Server session \"\"\" global _default_session if url == \"default\": url = DEFAULT_SERVER_URL if", "\"o+\": circle_cross, } def markers(): \"\"\" Prints a list of valid marker types", "supplied and output_file(...) was never called, nothing saved\") return if not curplot(): warnings.warn(\"No", "_doc_wrap(gf.patch) patches = _doc_wrap(gf.patches) quad = _doc_wrap(gf.quad) quadratic = _doc_wrap(gf.quadratic) ray = _doc_wrap(gf.ray)", "optional) : BokehJS resource config to use if `resources` is None, the current", "grid. Args: plot_arrangement (list[:class:`Plot <bokeh.objects.Plot>`]) : plots to arrange in a grid name", "Generally, this should be called at the beginning of an interactive session or", "root_url=url) session = _default_session session.use_doc(docname) session.load_document(curdoc()) def output_cloud(docname): \"\"\" Cause plotting commands to", "plotting. All subsequent plotting operations will affect the new figure. This function accepts", "object. Returns: plot : the current default plot (or None) ''' return curdoc().curplot()", "(bool, optional) : whether hold should be turned on or off (default: True)", "= [obj for obj in p.renderers if isinstance(obj, Legend)] return _list_attr_splat(legends) def xgrid():", "= _doc_wrap(gf.inverted_triangle) line = _doc_wrap(gf.line) multi_line = _doc_wrap(gf.multi_line) oval = _doc_wrap(gf.oval) patch =", "'%s'. Use markers() to see a list of valid marker types.\" % markertype)", "= _default_file['filename'] if resources is None and _default_file: resources = _default_file['resources'] if not", "called, this causes the file to be saved. If it is False, then", "from flask request\") return doc except (ImportError, RuntimeError, AttributeError): return _default_document def curplot():", "session=session, name=name) else: from . import load_notebook load_notebook() global _default_notebook _default_notebook = True", "current :class:`grid <bokeh.objects.Grid>` object(s) Returns: Returns grid object or splattable list of grid", "image = _doc_wrap(gf.image) image_rgba = _doc_wrap(gf.image_rgba) image_url = _doc_wrap(gf.image_url) inverted_triangle = _doc_wrap(gf.inverted_triangle) line", "a new window. \"\"\" filename = _default_file['filename'] if _default_file else None session =", "@wraps(func) def wrapper(*args, **kwargs): retval = func(curdoc(), *args, **kwargs) if cursession() and curdoc()._autostore:", "<bokeh.objects.Grid>` object(s) Returns: Returns legend object or splattable list of legend objects on", "curdoc().add(grid) curdoc()._current_plot = grid # TODO (bev) don't use private attrs if _default_session:", "is used if present resources (Resources, optional) : BokehJS resource config to use", "if there is one. Returns: session : the current default session object (or", "subsequent plotting operations will affect the new figure. This function accepts all plot", "`y` :class:`grid <bokeh.objects.Grid>` object(s) Returns: Returns y-grid object or splattable list of y-grid", "want to use the global module level document \"\"\" from flask import request", "current output_server(...) session is used if present document (Document, optional) : BokehJS document", "_default_file['filename'] if _default_file else None session = cursession() notebook = _default_notebook # Map", "Use markers() to see a list of valid marker types.\" % markertype) return", "don't use private attrs if _default_session: push() if _default_file and _default_file['autosave']: save() return", "session: import IPython.core.displaypub as displaypub push(session=session) snippet = autoload_server(plot, cursession()) displaypub.publish_display_data('bokeh', {'text/html': snippet})", "auto-raising the window or tab displaying the current plot (for file/server output modes)", "document. If a filename is supplied, or output_file(...) has been called, this will", "to save document under (default: None) if `filename` is None, the current output_file(...)", "None def curdoc(): ''' Return the current document. Returns: doc : the current", "or clear the plot hold status on the current document. This is a", "you don't want to use the global module level document \"\"\" from flask", ": autosave, 'title' : title, } if os.path.isfile(filename): print(\"Session output file '%s' already", "marker type '%s'. Use markers() to see a list of valid marker types.\"", "if docname is None: docname = \"IPython Session at %s\" % time.ctime() output_server(docname,", "name of document to push on Bokeh server An existing documents with the", "the server, within a request context. (Applets do this for example) in this", "for this plot .. note:: `plot_arrangement` can be nested, e.g [[p1, p2], [p3,", "keyword arguments like **username**, **userapikey**, and **base_url** can also be supplied. Returns: None", "is supplied, or output_file(...) has been called, this will save the plot to", "session = _default_session session.use_doc(docname) session.load_document(curdoc()) def output_cloud(docname): \"\"\" Cause plotting commands to automatically", "__future__ import print_function from functools import wraps import itertools import time import logging", "legends = [obj for obj in p.renderers if isinstance(obj, Legend)] return _list_attr_splat(legends) def", "be overwritten. .. note:: Generally, this should be called at the beginning of", "url=url, session=session, name=name) else: from . import load_notebook load_notebook() global _default_notebook _default_notebook =", "to see a list of valid marker types.\" % markertype) return _marker_types[markertype](*args, **kwargs)", "specifying which browser to display in, e.g. \"safari\", \"firefox\", \"opera\", \"windows-default\". (See the", "All subsequent plotting operations will affect the new figure. This function accepts all", "to the various static files should be computed. .. note:: Generally, this should", "1}[new] controller = browserlib.get_browser_controller(browser=browser) plot = curplot() if not plot: warnings.warn(\"No current plot", "axis objects Returns: Returns axis object or splattable list of axis objects on", "\"\"\" Get the current `x` axis object(s) Returns: Returns x-axis object or splattable", "= _doc_wrap(gf.x) _marker_types = { \"asterisk\": asterisk, \"circle\": circle, \"circle_cross\": circle_cross, \"circle_x\": circle_x,", "the window or tab displaying the current plot (for file/server output modes) or", "(see http://bokeh.pydata.org/index.html)\") return if notebook and session: import IPython.core.displaypub as displaypub push(session=session) snippet", "circle, \"ox\": circle_x, \"o+\": circle_cross, } def markers(): \"\"\" Prints a list of", "_handle_1d_data_args, _list_attr_splat ) from .resources import Resources from .session import Cloud, DEFAULT_SERVER_URL, Session", "should be turned on or off (default: True) Returns: None ''' curdoc().hold(value) def", "session (Session, optional) : An explicit session to use (default: None) If session", "the given x and y items. Args: *args : The data to plot.", "y-grid object or splattable list of y-grid objects on the current plot \"\"\"", "if isinstance(obj, Axis) and obj.dimension==0] return _list_attr_splat(axis) def yaxis(): \"\"\" Get the current", "get_default_alpha, _handle_1d_data_args, _list_attr_splat ) from .resources import Resources from .session import Cloud, DEFAULT_SERVER_URL,", "<reponame>csaid/bokeh from __future__ import print_function from functools import wraps import itertools import time", "Plot\", autosave=True, mode=\"inline\", root_dir=None): \"\"\" Outputs to a static HTML file. .. note::", "save document under (default: None) if `sessiokn` is None, the current output_server(...) session", "Axis) and obj.dimension==0] return _list_attr_splat(axis) def yaxis(): \"\"\" Get the current `y` axis", "name=None): if session or url or name: if docname is None: docname =", "...) \"\"\" ds = kwargs.get(\"source\", None) names, datasource = _handle_1d_data_args(args, datasource=ds) kwargs[\"source\"] =", "figure for plotting. All subsequent plotting operations will affect the new figure. This", "for the current document. Args: session (Sesion, optional) : filename to save document", "the current plot, by auto-raising the window or tab displaying the current plot", "return retval wrapper.__doc__ += \"\\nThis is a convenience function that acts on the", "_doc_wrap(gf.asterisk) bezier = _doc_wrap(gf.bezier) circle = _doc_wrap(gf.circle) circle_cross = _doc_wrap(gf.circle_cross) circle_x = _doc_wrap(gf.circle_x)", "if not curplot(): warnings.warn(\"No current plot to save. Use renderer functions (circle, rect,", "see a list of valid marker types.\" % markertype) return _marker_types[markertype](*args, **kwargs) def", "Axis) and obj.dimension==1] return _list_attr_splat(axis) def axis(): \"\"\" Get the current `x` axis", "specified to indicate the base directory from which the path to the various", "image_rgba = _doc_wrap(gf.image_rgba) image_url = _doc_wrap(gf.image_url) inverted_triangle = _doc_wrap(gf.inverted_triangle) line = _doc_wrap(gf.line) multi_line", "called at the beginning of an interactive session or the top of a", "return _list_attr_splat(legends) def xgrid(): \"\"\" Get the current `x` :class:`grid <bokeh.objects.Grid>` object(s) Returns:", "name: grid._id = name # Walk the plot_arrangement and remove them from the", "argument new_param = {'tab': 2, 'window': 1}[new] controller = browserlib.get_browser_controller(browser=browser) plot = curplot()", "current plot to save. Use renderer functions (circle, rect, etc.) to create a", "the current :class:`legend <bokeh.objects.Legend>` object(s) Returns: Returns legend object or splattable list of", "shorthand to set both fill and line color All the :ref:`userguide_line_properties` and :ref:`userguide_fill_properties`", "as displaypub push(session=session) snippet = autoload_server(plot, cursession()) displaypub.publish_display_data('bokeh', {'text/html': snippet}) elif notebook: import", "\"circle_cross\": circle_cross, \"circle_x\": circle_x, \"cross\": cross, \"diamond\": diamond, \"diamond_cross\": diamond_cross, \"inverted_triangle\": inverted_triangle, \"square\":", "Return the current session, if there is one. Returns: session : the current", "to the given filename. Args: filename (str, optional) : filename to save document", "static HTML file. .. note:: This file will be overwritten each time show()", "current plot \"\"\" return _list_attr_splat(xaxis() + yaxis()) def legend(): \"\"\" Get the current", "if name is None: name = url if not session: if not _default_session:", "to a static HTML file. .. note:: This file will be overwritten each", "return None axis = [obj for obj in p.renderers if isinstance(obj, Axis) and", "_default_file and _default_file['autosave']: save() return retval wrapper.__doc__ += \"\\nThis is a convenience function", "None axis = [obj for obj in p.renderers if isinstance(obj, Axis) and obj.dimension==1]", "y-axis object or splattable list of y-axis objects on the current plot \"\"\"", "legend(): \"\"\" Get the current :class:`legend <bokeh.objects.Legend>` object(s) Returns: Returns legend object or", "use the global module level document \"\"\" from flask import request doc =", "= None def curdoc(): ''' Return the current document. Returns: doc : the", "= _doc_wrap(gf.image_url) inverted_triangle = _doc_wrap(gf.inverted_triangle) line = _doc_wrap(gf.line) multi_line = _doc_wrap(gf.multi_line) oval =", "current plot \"\"\" p = curplot() if p is None: return None legends", "grid = [obj for obj in p.renderers if isinstance(obj, Grid) and obj.dimension==1] return", "opens or raises the browser window showing the current output file. If **new**", "value, optional): shorthand to set both fill and line color All the :ref:`userguide_line_properties`", "to \"circle\" color (color value, optional): shorthand to set both fill and line", "\"\"\" ds = kwargs.get(\"source\", None) names, datasource = _handle_1d_data_args(args, datasource=ds) kwargs[\"source\"] = datasource", "*args : The data to plot. Can be of several forms: (X, Y)", "= url if not session: if not _default_session: _default_session = Session(name=name, root_url=url) session", "[[p1, p2], [p3, p4]] Returns: grid_plot: the current :class:`GridPlot <bokeh.objects.GridPlot>` \"\"\" grid =", "of grid objects on the current plot \"\"\" return _list_attr_splat(xgrid() + ygrid()) def", "to call the plotting API from within the server, within a request context.", "documents with the same name will be overwritten. .. note:: Generally, this should", "to a Bokeh server. Can use explicitly provided Session for persistence, or the", "the current output_server(...) session is used if present document (Document, optional) : BokehJS", "value (bool, optional) : whether hold should be turned on or off (default:", "import notebook_div, file_html, autoload_server from .objects import Axis, ColumnDataSource, Glyph, Grid, GridPlot, Legend", "overwritten. session (Session, optional) : An explicit session to use (default: None) If", "Just call curplot()? if not len(_color_fields.intersection(set(kwargs.keys()))): kwargs['color'] = get_default_color() if not len(_alpha_fields.intersection(set(kwargs.keys()))): kwargs['alpha']", "_doc_wrap(gf.annular_wedge) annulus = _doc_wrap(gf.annulus) arc = _doc_wrap(gf.arc) asterisk = _doc_wrap(gf.asterisk) bezier = _doc_wrap(gf.bezier)", "use private attrs if _default_session: push() if _default_file and _default_file['autosave']: save() return grid", "(default: \"inline\") **mode** can be 'inline', 'cdn', 'relative(-dev)' or 'absolute(-dev)'. In the 'relative(-dev)'", "legend objects on the current plot \"\"\" p = curplot() if p is", "plot = curplot() if not plot: warnings.warn(\"No current plot to show. Use renderer", "if notebook and session: import IPython.core.displaypub as displaypub push(session=session) snippet = autoload_server(plot, cursession())", "default session object (or None) ''' return _default_session def hold(value=True): ''' Set or", "filename. Args: filename (str, optional) : filename to save document under (default: None)", "keyword parameters. Returns: None ''' curdoc().figure(**kwargs) def output_server(docname, session=None, url=\"default\", name=None): \"\"\" Cause", "output file '%s' already exists, will be overwritten.\" % filename) def show(browser=None, new=\"tab\",", ":class:`grid <bokeh.objects.Grid>` object(s) Returns: Returns y-grid object or splattable list of y-grid objects", "or the top of a script. .. note:: Calling this function will replaces", "hold should be turned on or off (default: True) Returns: None ''' curdoc().hold(value)", "to curdoc().%s(...)\" % func.__name__ return wrapper annular_wedge = _doc_wrap(gf.annular_wedge) annulus = _doc_wrap(gf.annulus) arc", "to arrange in a grid name (str) : name for this plot ..", "''' curdoc().figure(**kwargs) def output_server(docname, session=None, url=\"default\", name=None): \"\"\" Cause plotting commands to automatically", "'cdn', 'relative(-dev)' or 'absolute(-dev)'. In the 'relative(-dev)' case, **root_dir** can be specified to", "current default plot (or None) ''' return curdoc().curplot() def cursession(): ''' Return the", "push() if url: controller.open(url, new=new_param) else: controller.open(session.object_link(curdoc()._plotcontext)) elif filename: save(filename) controller.open(\"file://\" + os.path.abspath(filename),", "optional) : filename to save document under (default: None) if `sessiokn` is None,", ">>> scatter(\"data1\", \"data2\", source=data_source, ...) \"\"\" ds = kwargs.get(\"source\", None) names, datasource =", "be overwritten.\" % filename) def show(browser=None, new=\"tab\", url=None): \"\"\" 'shows' the current plot,", "Additional keyword arguments like **username**, **userapikey**, and **base_url** can also be supplied. Returns:", "1D arrays or iterables (XNAME, YNAME) Two bokeh DataSource/ColumnsRef marker (str, optional): a", "list of x-axis objects on the current plot \"\"\" return _list_attr_splat(xaxis() + yaxis())", "root_dir=root_dir, minified=False), 'autosave' : autosave, 'title' : title, } if os.path.isfile(filename): print(\"Session output", "(Applets do this for example) in this case you still want the API", ": if name is None, use the server URL as the name Additional", "current :class:`legend <bokeh.objects.Legend>` object(s) Returns: Returns legend object or splattable list of legend", "to push if `document` is None, the current default document is pushed Returns:", "autosave (bool, optional) : whether to automatically save (default: True) If **autosave** is", "the **browser** argument allows specifying which browser to display in, e.g. \"safari\", \"firefox\",", "\"circle_x\": circle_x, \"cross\": cross, \"diamond\": diamond, \"diamond_cross\": diamond_cross, \"inverted_triangle\": inverted_triangle, \"square\": square, \"square_x\":", "with (default: None) For systems that support it, the **browser** argument allows specifying", "\"windows-default\". (See the webbrowser module documentation in the standard lib for more details.)", "same name will be overwritten. .. note:: Generally, this should be called at", "this plot .. note:: `plot_arrangement` can be nested, e.g [[p1, p2], [p3, p4]]", "invoked. Args: autosave (bool, optional) : whether to automatically save (default: True) If", "if not session: session = cursession() if not document: document = curdoc() if", "'resources' : Resources(mode=mode, root_dir=root_dir, minified=False), 'autosave' : autosave, 'title' : title, } if", "the plot hold status on the current document. This is a convenience function", ": The data to plot. Can be of several forms: (X, Y) Two", "list of y-axis objects on the current plot \"\"\" p = curplot() if", "be called at the beginning of an interactive session or the top of", "None and _default_file: filename = _default_file['filename'] if resources is None and _default_file: resources", "plot \"\"\" p = curplot() if p is None: return None legends =", "objects on the current plot \"\"\" p = curplot() if p is None:", "on or off (default: True) Returns: None ''' curdoc().hold(value) def figure(**kwargs): ''' Activate", "**mode** can be 'inline', 'cdn', 'relative(-dev)' or 'absolute(-dev)'. In the 'relative(-dev)' case, **root_dir**", "top of a script. \"\"\" global _default_file _default_file = { 'filename' : filename,", "_default_notebook = True def output_file(filename, title=\"Bokeh Plot\", autosave=True, mode=\"inline\", root_dir=None): \"\"\" Outputs to", "displaypub.publish_display_data('bokeh', {'text/html': notebook_div(plot)}) elif session: push() if url: controller.open(url, new=new_param) else: controller.open(session.object_link(curdoc()._plotcontext)) elif", "as the name Additional keyword arguments like **username**, **userapikey**, and **base_url** can also", "note:: Calling this function will replaces any existing default Server session \"\"\" global", "is one. Returns: session : the current default session object (or None) '''", "from .session import Cloud, DEFAULT_SERVER_URL, Session logger = logging.getLogger(__name__) _default_document = Document() _default_session", ". import browserlib from . import _glyph_functions as gf from .document import Document", "opens a new window. \"\"\" filename = _default_file['filename'] if _default_file else None session", "Grid) and obj.dimension==0] return _list_attr_splat(grid) def ygrid(): \"\"\" Get the current `y` :class:`grid", "an interactive session or the top of a script. \"\"\" global _default_file _default_file", "visual functions is called, this causes the file to be saved. If it", ": filename to save document under (default: None) if `sessiokn` is None, the", "will affect the new figure. This function accepts all plot style keyword parameters.", "filename = _default_file['filename'] if _default_file else None session = cursession() notebook = _default_notebook", "markers() to see a list of valid marker types.\" % markertype) return _marker_types[markertype](*args,", "the same name will be overwritten. session (Session, optional) : An explicit session", "or output_file(...) has been called, this will save the plot to the given", "displaypub.publish_display_data('bokeh', {'text/html': snippet}) elif notebook: import IPython.core.displaypub as displaypub displaypub.publish_display_data('bokeh', {'text/html': notebook_div(plot)}) elif", "be of several forms: (X, Y) Two 1D arrays or iterables (XNAME, YNAME)", "status on the current document. This is a convenience function that acts on", "if not resources: warnings.warn(\"save() called but no resources was supplied and output_file(...) was", "from flask import request doc = request.bokeh_server_document logger.debug(\"returning config from flask request\") return", "Document from .embed import notebook_div, file_html, autoload_server from .objects import Axis, ColumnDataSource, Glyph,", ": whether hold should be turned on or off (default: True) Returns: None", "source=data_source, ...) \"\"\" ds = kwargs.get(\"source\", None) names, datasource = _handle_1d_data_args(args, datasource=ds) kwargs[\"source\"]", "**userapikey**, and **base_url** can also be supplied. Returns: None .. note:: Generally, this", "this function will replaces any existing default Server session \"\"\" output_server(docname, session=Cloud()) def", "called, this will save the plot to the given filename. Args: filename (str,", "object or splattable list of grid objects on the current plot \"\"\" return", "Returns: session : the current default session object (or None) ''' return _default_session", "from .document import Document from .embed import notebook_div, file_html, autoload_server from .objects import", "kwargs.get(\"marker\", \"circle\") # TODO: How to handle this? Just call curplot()? if not", "name is None, use the server URL as the name Additional keyword arguments", "default Server session \"\"\" output_server(docname, session=Cloud()) def output_notebook(url=None, docname=None, session=None, name=None): if session", "the current output_file(...) filename is used if present resources (Resources, optional) : BokehJS", "the current :class:`GridPlot <bokeh.objects.GridPlot>` \"\"\" grid = GridPlot(children=plot_arrangement) if name: grid._id = name", "{ 'filename' : filename, 'resources' : Resources(mode=mode, root_dir=root_dir, minified=False), 'autosave' : autosave, 'title'", "set([\"alpha\", \"fill_alpha\", \"line_alpha\"]) def scatter(*args, **kwargs): \"\"\" Creates a scatter plot of the", "splattable list of grid objects on the current plot \"\"\" return _list_attr_splat(xgrid() +", "None, the current output_server(...) session is used if present document (Document, optional) :", "# TODO (bev) don't use private attrs if _default_session: push() if _default_file and", "if os.path.isfile(filename): print(\"Session output file '%s' already exists, will be overwritten.\" % filename)", "If **new** is 'tab', then opens a new tab. If **new** is 'window',", "or raises the browser window showing the current output file. If **new** is", "\"square_cross\": square_cross, \"triangle\": triangle, \"x\": x, \"*\": asterisk, \"+\": cross, \"o\": circle, \"ox\":", "kwargs.get(\"source\", None) names, datasource = _handle_1d_data_args(args, datasource=ds) kwargs[\"source\"] = datasource markertype = kwargs.get(\"marker\",", "**autosave** is True, then every time plot() or one of the other visual", "square_x = _doc_wrap(gf.square_x) text = _doc_wrap(gf.text) triangle = _doc_wrap(gf.triangle) wedge = _doc_wrap(gf.wedge) x", "displaypub push(session=session) snippet = autoload_server(plot, cursession()) displaypub.publish_display_data('bokeh', {'text/html': snippet}) elif notebook: import IPython.core.displaypub", "Get the current :class:`legend <bokeh.objects.Legend>` object(s) Returns: Returns legend object or splattable list", "TODO: How to handle this? Just call curplot()? if not len(_color_fields.intersection(set(kwargs.keys()))): kwargs['color'] =", "curdoc().hold(...) Args: value (bool, optional) : whether hold should be turned on or", "current session, if there is one. Returns: session : the current default session", "= name # Walk the plot_arrangement and remove them from the plotcontext, #", "server. Args: docname (str) : name of document to push on Bokeh server", "func.__name__ return wrapper annular_wedge = _doc_wrap(gf.annular_wedge) annulus = _doc_wrap(gf.annulus) arc = _doc_wrap(gf.arc) asterisk", ". import _glyph_functions as gf from .document import Document from .embed import notebook_div,", "(str) : name for this plot .. note:: `plot_arrangement` can be nested, e.g", "directory from which the path to the various static files should be computed.", "will replaces any existing default Server session \"\"\" global _default_session if url ==", "we need to call the plotting API from within the server, within a", "BokehJS document to push if `document` is None, the current default document is", "with the data for the current document. If a filename is supplied, or", "window or tab displaying the current plot (for file/server output modes) or displaying", "_default_file: resources = _default_file['resources'] if not filename: warnings.warn(\"save() called but no filename was", "optional) : new file output mode (default: \"tab\") For file-based output, opens or", "the current default plot object. Returns: plot : the current default plot (or", "name # Walk the plot_arrangement and remove them from the plotcontext, # so", "of x-axis objects on the current plot \"\"\" return _list_attr_splat(xaxis() + yaxis()) def", "object (or None) ''' return _default_session def hold(value=True): ''' Set or clear the", "(Document, optional) : BokehJS document to push if `document` is None, the current", "or displaying it in an output cell (IPython notebook). Args: browser (str, optional)", "a valid marker_type, defaults to \"circle\" color (color value, optional): shorthand to set", "An existing documents with the same name will be overwritten. .. note:: Generally,", "plot \"\"\" return _list_attr_splat(xaxis() + yaxis()) def legend(): \"\"\" Get the current :class:`legend", "oval = _doc_wrap(gf.oval) patch = _doc_wrap(gf.patch) patches = _doc_wrap(gf.patches) quad = _doc_wrap(gf.quad) quadratic", "\"\"\" if not session: session = cursession() if not document: document = curdoc()", "_doc_wrap(gf.image) image_rgba = _doc_wrap(gf.image_rgba) image_url = _doc_wrap(gf.image_url) inverted_triangle = _doc_wrap(gf.inverted_triangle) line = _doc_wrap(gf.line)", "rect = _doc_wrap(gf.rect) segment = _doc_wrap(gf.segment) square = _doc_wrap(gf.square) square_cross = _doc_wrap(gf.square_cross) square_x", "p.renderers if isinstance(obj, Grid) and obj.dimension==0] return _list_attr_splat(grid) def ygrid(): \"\"\" Get the", "import Document from .embed import notebook_div, file_html, autoload_server from .objects import Axis, ColumnDataSource,", "and output_file(...) was never called, nothing saved\") return if not curplot(): warnings.warn(\"No current", "for obj in p.renderers if isinstance(obj, Grid) and obj.dimension==0] return _list_attr_splat(grid) def ygrid():", "docname=None, session=None, name=None): if session or url or name: if docname is None:", "display in, e.g. \"safari\", \"firefox\", \"opera\", \"windows-default\". (See the webbrowser module documentation in", "'absolute(-dev)'. In the 'relative(-dev)' case, **root_dir** can be specified to indicate the base", "showing the current output file. If **new** is 'tab', then opens a new", "on the current plot \"\"\" p = curplot() if p is None: return", "case you still want the API to work but you don't want to", "level document \"\"\" from flask import request doc = request.bokeh_server_document logger.debug(\"returning config from", "Can be of several forms: (X, Y) Two 1D arrays or iterables (XNAME,", "import wraps import itertools import time import logging import os import uuid import", "set(subplots)) curdoc().add(grid) curdoc()._current_plot = grid # TODO (bev) don't use private attrs if", "parameters. Examples: >>> scatter([1,2,3],[4,5,6], fill_color=\"red\") >>> scatter(\"data1\", \"data2\", source=data_source, ...) \"\"\" ds =", "the file with the data for the current document. If a filename is", "Grid, GridPlot, Legend from .palettes import brewer from .plotting_helpers import ( get_default_color, get_default_alpha,", "<bokeh.objects.Grid>` object(s) Returns: Returns y-grid object or splattable list of y-grid objects on", "script. .. note:: Calling this function will replaces any existing default Server session", "nothing pushed\") def _doc_wrap(func): @wraps(func) def wrapper(*args, **kwargs): retval = func(curdoc(), *args, **kwargs)", "url=\"default\", name=None): \"\"\" Cause plotting commands to automatically persist plots to a Bokeh", "to display in, e.g. \"safari\", \"firefox\", \"opera\", \"windows-default\". (See the webbrowser module documentation", "current plot (see http://bokeh.pydata.org/index.html)\") return html = file_html(curdoc(), resources, _default_file['title']) with open(filename, \"w\")", "(default: None) if `filename` is None, the current output_file(...) filename is used if", "remove them from the plotcontext, # so they don't show up twice subplots", "current document. Returns: doc : the current default document object. ''' try: \"\"\"This", "filename is supplied, or output_file(...) has been called, this will save the plot", "optional) : BokehJS document to push if `document` is None, the current default", "API to work but you don't want to use the global module level", "document = curdoc() if session: return session.store_document(curdoc()) else: warnings.warn(\"push() called but no session", "documents with the same name will be overwritten. session (Session, optional) : An", "current default resource config is used Returns: None \"\"\" if filename is None", "push() if _default_file and _default_file['autosave']: save() return grid def xaxis(): \"\"\" Get the", "p.renderers if isinstance(obj, Axis) and obj.dimension==0] return _list_attr_splat(axis) def yaxis(): \"\"\" Get the", "(Session, optional) : An explicit session to use (default: None) If session is", "and line color All the :ref:`userguide_line_properties` and :ref:`userguide_fill_properties` are also accepted as keyword", "_color_fields = set([\"color\", \"fill_color\", \"line_color\"]) _alpha_fields = set([\"alpha\", \"fill_alpha\", \"line_alpha\"]) def scatter(*args, **kwargs):", "not _default_session: _default_session = Session(name=name, root_url=url) session = _default_session session.use_doc(docname) session.load_document(curdoc()) def output_cloud(docname):", "if isinstance(obj, Grid) and obj.dimension==0] return _list_attr_splat(grid) def ygrid(): \"\"\" Get the current", "to work but you don't want to use the global module level document", "return _list_attr_splat(grid) def ygrid(): \"\"\" Get the current `y` :class:`grid <bokeh.objects.Grid>` object(s) Returns:", "brewer from .plotting_helpers import ( get_default_color, get_default_alpha, _handle_1d_data_args, _list_attr_splat ) from .resources import", "**root_dir** can be specified to indicate the base directory from which the path", "file/server output modes) or displaying it in an output cell (IPython notebook). Args:", "for plotting. All subsequent plotting operations will affect the new figure. This function", "objects on the current plot \"\"\" return _list_attr_splat(xaxis() + yaxis()) def legend(): \"\"\"", "None) names, datasource = _handle_1d_data_args(args, datasource=ds) kwargs[\"source\"] = datasource markertype = kwargs.get(\"marker\", \"circle\")", "no session was supplied and output_server(...) was never called, nothing pushed\") def _doc_wrap(func):", "If **autosave** is True, then every time plot() or one of the other", "obj in p.renderers if isinstance(obj, Grid) and obj.dimension==1] return _list_attr_splat(grid) def grid(): \"\"\"", "use the default session url (str, optianal) : URL of the Bokeh server", "default Server session \"\"\" global _default_session if url == \"default\": url = DEFAULT_SERVER_URL", ".. note:: `plot_arrangement` can be nested, e.g [[p1, p2], [p3, p4]] Returns: grid_plot:", "If it is False, then the file is only saved upon calling show().", "with open(filename, \"w\") as f: f.write(html) def push(session=None, document=None): \"\"\" Updates the server", "elif filename: save(filename) controller.open(\"file://\" + os.path.abspath(filename), new=new_param) def save(filename=None, resources=None): \"\"\" Updates the", "session = cursession() if not document: document = curdoc() if session: return session.store_document(curdoc())", "indicate the base directory from which the path to the various static files", "name is None: name = url if not session: if not _default_session: _default_session", "if not document: document = curdoc() if session: return session.store_document(curdoc()) else: warnings.warn(\"push() called", "Can use explicitly provided Session for persistence, or the default session. Args: docname", "URL of the Bokeh server (default: \"default\") if url is \"default\" use session.DEFAULT_SERVER_URL", "False, then the file is only saved upon calling show(). mode (str, optional)", "_doc_wrap(gf.circle_x) cross = _doc_wrap(gf.cross) diamond = _doc_wrap(gf.diamond) diamond_cross = _doc_wrap(gf.diamond_cross) image = _doc_wrap(gf.image)", "current plot \"\"\" return _list_attr_splat(xgrid() + ygrid()) def load_object(obj): \"\"\"updates object from the", "inverted_triangle = _doc_wrap(gf.inverted_triangle) line = _doc_wrap(gf.line) multi_line = _doc_wrap(gf.multi_line) oval = _doc_wrap(gf.oval) patch", ": URL of the Bokeh server (default: \"default\") if url is \"default\" use", "else: controller.open(session.object_link(curdoc()._plotcontext)) elif filename: save(filename) controller.open(\"file://\" + os.path.abspath(filename), new=new_param) def save(filename=None, resources=None): \"\"\"", "None axis = [obj for obj in p.renderers if isinstance(obj, Axis) and obj.dimension==0]", "YNAME) Two bokeh DataSource/ColumnsRef marker (str, optional): a valid marker_type, defaults to \"circle\"", "filename is None and _default_file: filename = _default_file['filename'] if resources is None and", "within the server, within a request context. (Applets do this for example) in", "curdoc().get_context().children = list(set(curdoc().get_context().children) - set(subplots)) curdoc().add(grid) curdoc()._current_plot = grid # TODO (bev) don't", "return _list_attr_splat(axis) def axis(): \"\"\" Get the current `x` axis object(s) Returns: Returns", "HTML file. .. note:: This file will be overwritten each time show() or", "modes) or displaying it in an output cell (IPython notebook). Args: browser (str,", "default plot object. Returns: plot : the current default plot (or None) '''", "current default plot object. Returns: plot : the current default plot (or None)", "used Returns: None \"\"\" if filename is None and _default_file: filename = _default_file['filename']", "is called, this causes the file to be saved. If it is False,", "do this for example) in this case you still want the API to", "was never called, nothing saved\") return if not resources: warnings.warn(\"save() called but no", "= _doc_wrap(gf.circle_cross) circle_x = _doc_wrap(gf.circle_x) cross = _doc_wrap(gf.cross) diamond = _doc_wrap(gf.diamond) diamond_cross =", "asterisk, \"circle\": circle, \"circle_cross\": circle_cross, \"circle_x\": circle_x, \"cross\": cross, \"diamond\": diamond, \"diamond_cross\": diamond_cross,", "be overwritten each time show() or save() is invoked. Args: autosave (bool, optional)", "_default_file else None session = cursession() notebook = _default_notebook # Map our string", "static files should be computed. .. note:: Generally, this should be called at", "if isinstance(obj, Grid) and obj.dimension==1] return _list_attr_splat(grid) def grid(): \"\"\" Get the current", "plot hold status on the current document. This is a convenience function that", "names, datasource = _handle_1d_data_args(args, datasource=ds) kwargs[\"source\"] = datasource markertype = kwargs.get(\"marker\", \"circle\") #", "\"opera\", \"windows-default\". (See the webbrowser module documentation in the standard lib for more", "the API to work but you don't want to use the global module", "_default_session session.use_doc(docname) session.load_document(curdoc()) def output_cloud(docname): \"\"\" Cause plotting commands to automatically persist plots", "current document. If a filename is supplied, or output_file(...) has been called, this", "handle this? Just call curplot()? if not len(_color_fields.intersection(set(kwargs.keys()))): kwargs['color'] = get_default_color() if not", "curplot() if p is None: return None legends = [obj for obj in", "\"\"\" Outputs to a static HTML file. .. note:: This file will be", "\"\"\" Updates the file with the data for the current document. If a", "is equivalent to curdoc().hold(...) Args: value (bool, optional) : whether hold should be", "allows specifying which browser to display in, e.g. \"safari\", \"firefox\", \"opera\", \"windows-default\". (See", "_list_attr_splat(axis) def yaxis(): \"\"\" Get the current `y` axis object(s) Returns: Returns y-axis", "displaypub displaypub.publish_display_data('bokeh', {'text/html': notebook_div(plot)}) elif session: push() if url: controller.open(url, new=new_param) else: controller.open(session.object_link(curdoc()._plotcontext))", "or name: if docname is None: docname = \"IPython Session at %s\" %", "interactive session or the top of a script. \"\"\" global _default_file _default_file =", "file. If **new** is 'tab', then opens a new tab. If **new** is", "curdoc(): ''' Return the current document. Returns: doc : the current default document", "is None, the current default resource config is used Returns: None \"\"\" if", "to use (default: None) If session is None, use the default session url", "text = _doc_wrap(gf.text) triangle = _doc_wrap(gf.triangle) wedge = _doc_wrap(gf.wedge) x = _doc_wrap(gf.x) _marker_types", "= set([\"color\", \"fill_color\", \"line_color\"]) _alpha_fields = set([\"alpha\", \"fill_alpha\", \"line_alpha\"]) def scatter(*args, **kwargs): \"\"\"", "name=None): \"\"\" Generate a plot that arranges several subplots into a grid. Args:", "\"tab\") For file-based output, opens or raises the browser window showing the current", "% filename) def show(browser=None, new=\"tab\", url=None): \"\"\" 'shows' the current plot, by auto-raising", "time plot() or one of the other visual functions is called, this causes", "the current plot \"\"\" return _list_attr_splat(xgrid() + ygrid()) def load_object(obj): \"\"\"updates object from", "= set([\"alpha\", \"fill_alpha\", \"line_alpha\"]) def scatter(*args, **kwargs): \"\"\" Creates a scatter plot of", "Returns axis object or splattable list of axis objects on the current plot", "y-axis objects on the current plot \"\"\" p = curplot() if p is", "default document object. ''' try: \"\"\"This is used when we need to call", "session=None, url=\"default\", name=None): \"\"\" Cause plotting commands to automatically persist plots to a", "= _doc_wrap(gf.image_rgba) image_url = _doc_wrap(gf.image_url) inverted_triangle = _doc_wrap(gf.inverted_triangle) line = _doc_wrap(gf.line) multi_line =", ".session import Cloud, DEFAULT_SERVER_URL, Session logger = logging.getLogger(__name__) _default_document = Document() _default_session =", "to automatically save (default: True) If **autosave** is True, then every time plot()", "a plot that arranges several subplots into a grid. Args: plot_arrangement (list[:class:`Plot <bokeh.objects.Plot>`])", "file. .. note:: This file will be overwritten each time show() or save()", "which browser to display in, e.g. \"safari\", \"firefox\", \"opera\", \"windows-default\". (See the webbrowser", ":class:`grid <bokeh.objects.Grid>` object(s) Returns: Returns grid object or splattable list of grid objects", "\"diamond_cross\": diamond_cross, \"inverted_triangle\": inverted_triangle, \"square\": square, \"square_x\": square_x, \"square_cross\": square_cross, \"triangle\": triangle, \"x\":", "resources=None): \"\"\" Updates the file with the data for the current document. If", "in p.renderers if isinstance(obj, Axis) and obj.dimension==0] return _list_attr_splat(axis) def yaxis(): \"\"\" Get", "\"\"\" grid = GridPlot(children=plot_arrangement) if name: grid._id = name # Walk the plot_arrangement", "doc except (ImportError, RuntimeError, AttributeError): return _default_document def curplot(): ''' Return the current", "os.path.abspath(filename), new=new_param) def save(filename=None, resources=None): \"\"\" Updates the file with the data for", "[obj for obj in p.renderers if isinstance(obj, Grid) and obj.dimension==1] return _list_attr_splat(grid) def", "notebook_div, file_html, autoload_server from .objects import Axis, ColumnDataSource, Glyph, Grid, GridPlot, Legend from", "document to push on Bokeh server An existing documents with the same name", "plot of the given x and y items. Args: *args : The data", "\"cross\": cross, \"diamond\": diamond, \"diamond_cross\": diamond_cross, \"inverted_triangle\": inverted_triangle, \"square\": square, \"square_x\": square_x, \"square_cross\":", "if resources is None and _default_file: resources = _default_file['resources'] if not filename: warnings.warn(\"save()", "Cause plotting commands to automatically persist plots to a Bokeh server. Can use", "filename was supplied and output_file(...) was never called, nothing saved\") return if not", "circle_cross, \"circle_x\": circle_x, \"cross\": cross, \"diamond\": diamond, \"diamond_cross\": diamond_cross, \"inverted_triangle\": inverted_triangle, \"square\": square,", "resources was supplied and output_file(...) was never called, nothing saved\") return if not", "a list of valid marker types for scatter() Returns: None \"\"\" print(list(sorted(_marker_types.keys()))) _color_fields", "_doc_wrap(gf.diamond_cross) image = _doc_wrap(gf.image) image_rgba = _doc_wrap(gf.image_rgba) image_url = _doc_wrap(gf.image_url) inverted_triangle = _doc_wrap(gf.inverted_triangle)", "or off (default: True) Returns: None ''' curdoc().hold(value) def figure(**kwargs): ''' Activate a", "doc = request.bokeh_server_document logger.debug(\"returning config from flask request\") return doc except (ImportError, RuntimeError,", "xaxis(): \"\"\" Get the current axis objects Returns: Returns axis object or splattable", "\"\"\" filename = _default_file['filename'] if _default_file else None session = cursession() notebook =", "All the :ref:`userguide_line_properties` and :ref:`userguide_fill_properties` are also accepted as keyword parameters. Examples: >>>", "explicitly provided Session for persistence, or the default session. Args: docname (str) :", "axis = [obj for obj in p.renderers if isinstance(obj, Axis) and obj.dimension==1] return", "\"\"\" p = curplot() if p is None: return None grid = [obj", "webbrowser module documentation in the standard lib for more details.) new (str, optional)", "_default_file['title']) with open(filename, \"w\") as f: f.write(html) def push(session=None, document=None): \"\"\" Updates the", "= [obj for obj in p.renderers if isinstance(obj, Axis) and obj.dimension==1] return _list_attr_splat(axis)", "existing default Server session \"\"\" output_server(docname, session=Cloud()) def output_notebook(url=None, docname=None, session=None, name=None): if", "grid objects on the current plot \"\"\" return _list_attr_splat(xgrid() + ygrid()) def load_object(obj):", "a new tab. If **new** is 'window', then opens a new window. \"\"\"", "_default_file _default_file = { 'filename' : filename, 'resources' : Resources(mode=mode, root_dir=root_dir, minified=False), 'autosave'", "= _doc_wrap(gf.quad) quadratic = _doc_wrap(gf.quadratic) ray = _doc_wrap(gf.ray) rect = _doc_wrap(gf.rect) segment =", "\"square_x\": square_x, \"square_cross\": square_cross, \"triangle\": triangle, \"x\": x, \"*\": asterisk, \"+\": cross, \"o\":", "None grid = [obj for obj in p.renderers if isinstance(obj, Grid) and obj.dimension==1]", "def curdoc(): ''' Return the current document. Returns: doc : the current default", "logger.debug(\"returning config from flask request\") return doc except (ImportError, RuntimeError, AttributeError): return _default_document", "filename is used if present resources (Resources, optional) : BokehJS resource config to", "grid._id = name # Walk the plot_arrangement and remove them from the plotcontext,", "\"square\": square, \"square_x\": square_x, \"square_cross\": square_cross, \"triangle\": triangle, \"x\": x, \"*\": asterisk, \"+\":", "valid marker types.\" % markertype) return _marker_types[markertype](*args, **kwargs) def gridplot(plot_arrangement, name=None): \"\"\" Generate", "of axis objects on the current plot \"\"\" p = curplot() if p", "save(filename=None, resources=None): \"\"\" Updates the file with the data for the current document.", "the standard lib for more details.) new (str, optional) : new file output", "and obj.dimension==1] return _list_attr_splat(axis) def axis(): \"\"\" Get the current `x` axis object(s)", "the current default session object (or None) ''' return _default_session def hold(value=True): '''", "isinstance(obj, Grid) and obj.dimension==0] return _list_attr_splat(grid) def ygrid(): \"\"\" Get the current `y`", "is invoked. Args: autosave (bool, optional) : whether to automatically save (default: True)", ": how to inlude BokehJS (default: \"inline\") **mode** can be 'inline', 'cdn', 'relative(-dev)'", "not len(_color_fields.intersection(set(kwargs.keys()))): kwargs['color'] = get_default_color() if not len(_alpha_fields.intersection(set(kwargs.keys()))): kwargs['alpha'] = get_default_alpha() if markertype", "calling show(). mode (str, optional) : how to inlude BokehJS (default: \"inline\") **mode**", "= cursession() notebook = _default_notebook # Map our string argument to the webbrowser.open", "= _doc_wrap(gf.line) multi_line = _doc_wrap(gf.multi_line) oval = _doc_wrap(gf.oval) patch = _doc_wrap(gf.patch) patches =", "diamond = _doc_wrap(gf.diamond) diamond_cross = _doc_wrap(gf.diamond_cross) image = _doc_wrap(gf.image) image_rgba = _doc_wrap(gf.image_rgba) image_url", "and obj.dimension==1] return _list_attr_splat(grid) def grid(): \"\"\" Get the current :class:`grid <bokeh.objects.Grid>` object(s)", "not resources: warnings.warn(\"save() called but no resources was supplied and output_file(...) was never", "logging import os import uuid import warnings from . import browserlib from .", "current `x` :class:`grid <bokeh.objects.Grid>` object(s) Returns: Returns legend object or splattable list of", "path to the various static files should be computed. .. note:: Generally, this", "controller.open(url, new=new_param) else: controller.open(session.object_link(curdoc()._plotcontext)) elif filename: save(filename) controller.open(\"file://\" + os.path.abspath(filename), new=new_param) def save(filename=None,", "server. Can use explicitly provided Session for persistence, or the default session. Args:", "a current plot (see http://bokeh.pydata.org/index.html)\") return if notebook and session: import IPython.core.displaypub as", "_default_file['resources'] if not filename: warnings.warn(\"save() called but no filename was supplied and output_file(...)", "from .plotting_helpers import ( get_default_color, get_default_alpha, _handle_1d_data_args, _list_attr_splat ) from .resources import Resources", "Bokeh server An existing documents with the same name will be overwritten. session", "and session: import IPython.core.displaypub as displaypub push(session=session) snippet = autoload_server(plot, cursession()) displaypub.publish_display_data('bokeh', {'text/html':", "push if `document` is None, the current default document is pushed Returns: None", "load_notebook() global _default_notebook _default_notebook = True def output_file(filename, title=\"Bokeh Plot\", autosave=True, mode=\"inline\", root_dir=None):", "(bev) don't use private attrs if _default_session: push() if _default_file and _default_file['autosave']: save()", "browser to display in, e.g. \"safari\", \"firefox\", \"opera\", \"windows-default\". (See the webbrowser module", "is None, the current output_file(...) filename is used if present resources (Resources, optional)", "plot_arrangement (list[:class:`Plot <bokeh.objects.Plot>`]) : plots to arrange in a grid name (str) :", "also accepted as keyword parameters. Examples: >>> scatter([1,2,3],[4,5,6], fill_color=\"red\") >>> scatter(\"data1\", \"data2\", source=data_source,", "an output cell (IPython notebook). Args: browser (str, optional) : browser to show", "resources is None and _default_file: resources = _default_file['resources'] if not filename: warnings.warn(\"save() called", "= _doc_wrap(gf.annular_wedge) annulus = _doc_wrap(gf.annulus) arc = _doc_wrap(gf.arc) asterisk = _doc_wrap(gf.asterisk) bezier =", "plot to the given filename. Args: filename (str, optional) : filename to save", "http://bokeh.pydata.org/index.html)\") return html = file_html(curdoc(), resources, _default_file['title']) with open(filename, \"w\") as f: f.write(html)", "open(filename, \"w\") as f: f.write(html) def push(session=None, document=None): \"\"\" Updates the server with", "**new** is 'tab', then opens a new tab. If **new** is 'window', then", "yaxis(): \"\"\" Get the current `y` axis object(s) Returns: Returns y-axis object or", "set([\"color\", \"fill_color\", \"line_color\"]) _alpha_fields = set([\"alpha\", \"fill_alpha\", \"line_alpha\"]) def scatter(*args, **kwargs): \"\"\" Creates", "xgrid(): \"\"\" Get the current `x` :class:`grid <bokeh.objects.Grid>` object(s) Returns: Returns legend object", "under (default: None) if `filename` is None, the current output_file(...) filename is used", "use (default: None) If session is None, use the default session url (str,", "gf from .document import Document from .embed import notebook_div, file_html, autoload_server from .objects", "''' Set or clear the plot hold status on the current document. This", "new figure for plotting. All subsequent plotting operations will affect the new figure.", "_default_file = { 'filename' : filename, 'resources' : Resources(mode=mode, root_dir=root_dir, minified=False), 'autosave' :", "def output_file(filename, title=\"Bokeh Plot\", autosave=True, mode=\"inline\", root_dir=None): \"\"\" Outputs to a static HTML", "is used when we need to call the plotting API from within the", "x-axis object or splattable list of x-axis objects on the current plot \"\"\"", "not session: session = cursession() if not document: document = curdoc() if session:", "standard lib for more details.) new (str, optional) : new file output mode", "push() if _default_file and _default_file['autosave']: save() return retval wrapper.__doc__ += \"\\nThis is a", "\"fill_color\", \"line_color\"]) _alpha_fields = set([\"alpha\", \"fill_alpha\", \"line_alpha\"]) def scatter(*args, **kwargs): \"\"\" Creates a", "{'text/html': snippet}) elif notebook: import IPython.core.displaypub as displaypub displaypub.publish_display_data('bokeh', {'text/html': notebook_div(plot)}) elif session:", "\"firefox\", \"opera\", \"windows-default\". (See the webbrowser module documentation in the standard lib for", "a grid. Args: plot_arrangement (list[:class:`Plot <bokeh.objects.Plot>`]) : plots to arrange in a grid", ".resources import Resources from .session import Cloud, DEFAULT_SERVER_URL, Session logger = logging.getLogger(__name__) _default_document", "This file will be overwritten each time show() or save() is invoked. Args:", "this? Just call curplot()? if not len(_color_fields.intersection(set(kwargs.keys()))): kwargs['color'] = get_default_color() if not len(_alpha_fields.intersection(set(kwargs.keys()))):", "= _doc_wrap(gf.oval) patch = _doc_wrap(gf.patch) patches = _doc_wrap(gf.patches) quad = _doc_wrap(gf.quad) quadratic =", "warnings.warn(\"save() called but no resources was supplied and output_file(...) was never called, nothing", "\"circle\" color (color value, optional): shorthand to set both fill and line color", "request.bokeh_server_document logger.debug(\"returning config from flask request\") return doc except (ImportError, RuntimeError, AttributeError): return", "splattable list of y-grid objects on the current plot \"\"\" p = curplot()", "Returns: doc : the current default document object. ''' try: \"\"\"This is used", "should be called at the beginning of an interactive session or the top", "Document() _default_session = None _default_file = None _default_notebook = None def curdoc(): '''", "save. Use renderer functions (circle, rect, etc.) to create a current plot (see", "Returns: None ''' curdoc().hold(value) def figure(**kwargs): ''' Activate a new figure for plotting.", "If **new** is 'window', then opens a new window. \"\"\" filename = _default_file['filename']", "object(s) Returns: Returns y-grid object or splattable list of y-grid objects on the", "(or None) ''' return _default_session def hold(value=True): ''' Set or clear the plot", "None ''' curdoc().figure(**kwargs) def output_server(docname, session=None, url=\"default\", name=None): \"\"\" Cause plotting commands to", "commands to automatically persist plots to a Bokeh server. Can use explicitly provided", "= _default_session session.use_doc(docname) session.load_document(curdoc()) def output_cloud(docname): \"\"\" Cause plotting commands to automatically persist", "\"+\": cross, \"o\": circle, \"ox\": circle_x, \"o+\": circle_cross, } def markers(): \"\"\" Prints", "function accepts all plot style keyword parameters. Returns: None ''' curdoc().figure(**kwargs) def output_server(docname,", "elif session: push() if url: controller.open(url, new=new_param) else: controller.open(session.object_link(curdoc()._plotcontext)) elif filename: save(filename) controller.open(\"file://\"", "None \"\"\" if filename is None and _default_file: filename = _default_file['filename'] if resources", "\"\"\" from flask import request doc = request.bokeh_server_document logger.debug(\"returning config from flask request\")", "when we need to call the plotting API from within the server, within", "minified=False), 'autosave' : autosave, 'title' : title, } if os.path.isfile(filename): print(\"Session output file", "%s\" % time.ctime() output_server(docname, url=url, session=session, name=name) else: from . import load_notebook load_notebook()", "kwargs[\"source\"] = datasource markertype = kwargs.get(\"marker\", \"circle\") # TODO: How to handle this?", "the current default document is pushed Returns: None \"\"\" if not session: session", "supplied, or output_file(...) has been called, this will save the plot to the", "for the current document. If a filename is supplied, or output_file(...) has been", "file is only saved upon calling show(). mode (str, optional) : how to", "still want the API to work but you don't want to use the", "provided Session for persistence, or the default session. Args: docname (str) : name", "of y-axis objects on the current plot \"\"\" p = curplot() if p", "in p.renderers if isinstance(obj, Grid) and obj.dimension==1] return _list_attr_splat(grid) def grid(): \"\"\" Get", "return _default_document def curplot(): ''' Return the current default plot object. Returns: plot", "notebook and session: import IPython.core.displaypub as displaypub push(session=session) snippet = autoload_server(plot, cursession()) displaypub.publish_display_data('bokeh',", "of a script. .. note:: Calling this function will replaces any existing default", "was supplied and output_server(...) was never called, nothing pushed\") def _doc_wrap(func): @wraps(func) def", "Updates the file with the data for the current document. If a filename", "as f: f.write(html) def push(session=None, document=None): \"\"\" Updates the server with the data", "a filename is supplied, or output_file(...) has been called, this will save the", "obj in p.renderers if isinstance(obj, Axis) and obj.dimension==0] return _list_attr_splat(axis) def yaxis(): \"\"\"", "p = curplot() if p is None: return None axis = [obj for", "= curplot() if p is None: return None legends = [obj for obj", "\"*\": asterisk, \"+\": cross, \"o\": circle, \"ox\": circle_x, \"o+\": circle_cross, } def markers():", "None .. note:: Generally, this should be called at the beginning of an", "output modes) or displaying it in an output cell (IPython notebook). Args: browser", "default plot (or None) ''' return curdoc().curplot() def cursession(): ''' Return the current", "plot (for file/server output modes) or displaying it in an output cell (IPython", "file-based output, opens or raises the browser window showing the current output file.", "the name Additional keyword arguments like **username**, **userapikey**, and **base_url** can also be", "is None: return None legends = [obj for obj in p.renderers if isinstance(obj,", "data to plot. Can be of several forms: (X, Y) Two 1D arrays", "len(_alpha_fields.intersection(set(kwargs.keys()))): kwargs['alpha'] = get_default_alpha() if markertype not in _marker_types: raise ValueError(\"Invalid marker type", "object or splattable list of legend objects on the current plot \"\"\" p", "def ygrid(): \"\"\" Get the current `y` :class:`grid <bokeh.objects.Grid>` object(s) Returns: Returns y-grid", "Get the current axis objects Returns: Returns axis object or splattable list of", "http://bokeh.pydata.org/index.html)\") return if notebook and session: import IPython.core.displaypub as displaypub push(session=session) snippet =", "_marker_types: raise ValueError(\"Invalid marker type '%s'. Use markers() to see a list of", "Returns: Returns legend object or splattable list of legend objects on the current", "request doc = request.bokeh_server_document logger.debug(\"returning config from flask request\") return doc except (ImportError,", ": BokehJS document to push if `document` is None, the current default document", "the current `x` :class:`grid <bokeh.objects.Grid>` object(s) Returns: Returns legend object or splattable list", "_list_attr_splat(xgrid() + ygrid()) def load_object(obj): \"\"\"updates object from the server \"\"\" cursession().load_object(obj, curdoc())", "\"ox\": circle_x, \"o+\": circle_cross, } def markers(): \"\"\" Prints a list of valid", "Returns: Returns y-grid object or splattable list of y-grid objects on the current", "import os import uuid import warnings from . import browserlib from . import", "arguments like **username**, **userapikey**, and **base_url** can also be supplied. Returns: None ..", "show with (default: None) For systems that support it, the **browser** argument allows", "<bokeh.objects.Plot>`]) : plots to arrange in a grid name (str) : name for", "_marker_types = { \"asterisk\": asterisk, \"circle\": circle, \"circle_cross\": circle_cross, \"circle_x\": circle_x, \"cross\": cross,", "url=None): \"\"\" 'shows' the current plot, by auto-raising the window or tab displaying", "affect the new figure. This function accepts all plot style keyword parameters. Returns:", "etc.) to create a current plot (see http://bokeh.pydata.org/index.html)\") return html = file_html(curdoc(), resources,", "DataSource/ColumnsRef marker (str, optional): a valid marker_type, defaults to \"circle\" color (color value,", "_default_file = None _default_notebook = None def curdoc(): ''' Return the current document.", "that acts on the current document, and is equivalent to curdoc().hold(...) Args: value", "= get_default_color() if not len(_alpha_fields.intersection(set(kwargs.keys()))): kwargs['alpha'] = get_default_alpha() if markertype not in _marker_types:", "\"\"\" Get the current axis objects Returns: Returns axis object or splattable list", "current plot \"\"\" p = curplot() if p is None: return None grid", "and output_server(...) was never called, nothing pushed\") def _doc_wrap(func): @wraps(func) def wrapper(*args, **kwargs):", "''' try: \"\"\"This is used when we need to call the plotting API", "warnings.warn(\"No current plot to show. Use renderer functions (circle, rect, etc.) to create", "= _doc_wrap(gf.diamond_cross) image = _doc_wrap(gf.image) image_rgba = _doc_wrap(gf.image_rgba) image_url = _doc_wrap(gf.image_url) inverted_triangle =", ") from .resources import Resources from .session import Cloud, DEFAULT_SERVER_URL, Session logger =", "the Bokeh server (default: \"default\") if url is \"default\" use session.DEFAULT_SERVER_URL name (str,", "if url == \"default\": url = DEFAULT_SERVER_URL if name is None: name =", "DEFAULT_SERVER_URL, Session logger = logging.getLogger(__name__) _default_document = Document() _default_session = None _default_file =", "= itertools.chain.from_iterable(plot_arrangement) curdoc().get_context().children = list(set(curdoc().get_context().children) - set(subplots)) curdoc().add(grid) curdoc()._current_plot = grid # TODO", "functions (circle, rect, etc.) to create a current plot (see http://bokeh.pydata.org/index.html)\") return html", "= _doc_wrap(gf.image) image_rgba = _doc_wrap(gf.image_rgba) image_url = _doc_wrap(gf.image_url) inverted_triangle = _doc_wrap(gf.inverted_triangle) line =", "''' Return the current session, if there is one. Returns: session : the", "= _doc_wrap(gf.square_cross) square_x = _doc_wrap(gf.square_x) text = _doc_wrap(gf.text) triangle = _doc_wrap(gf.triangle) wedge =", "and _default_file: resources = _default_file['resources'] if not filename: warnings.warn(\"save() called but no filename", "server with the data for the current document. Args: session (Sesion, optional) :", "never called, nothing saved\") return if not resources: warnings.warn(\"save() called but no resources", "\"fill_alpha\", \"line_alpha\"]) def scatter(*args, **kwargs): \"\"\" Creates a scatter plot of the given", "url is \"default\" use session.DEFAULT_SERVER_URL name (str, optional) : if name is None,", "the current plot \"\"\" p = curplot() if p is None: return None", "the 'relative(-dev)' case, **root_dir** can be specified to indicate the base directory from", "equivalent to curdoc().hold(...) Args: value (bool, optional) : whether hold should be turned", "Outputs to a static HTML file. .. note:: This file will be overwritten", "optianal) : URL of the Bokeh server (default: \"default\") if url is \"default\"", "None, the current default resource config is used Returns: None \"\"\" if filename", "if present document (Document, optional) : BokehJS document to push if `document` is", "plot_arrangement and remove them from the plotcontext, # so they don't show up", "module level document \"\"\" from flask import request doc = request.bokeh_server_document logger.debug(\"returning config", "def figure(**kwargs): ''' Activate a new figure for plotting. All subsequent plotting operations", "then the file is only saved upon calling show(). mode (str, optional) :", "if present resources (Resources, optional) : BokehJS resource config to use if `resources`", "the webbrowser.open argument new_param = {'tab': 2, 'window': 1}[new] controller = browserlib.get_browser_controller(browser=browser) plot", "file will be overwritten each time show() or save() is invoked. Args: autosave", "session or the top of a script. .. note:: Calling this function will", "Glyph, Grid, GridPlot, Legend from .palettes import brewer from .plotting_helpers import ( get_default_color,", "then every time plot() or one of the other visual functions is called,", "file with the data for the current document. If a filename is supplied,", "to automatically persist plots to the Bokeh cloud server. Args: docname (str) :", "name = url if not session: if not _default_session: _default_session = Session(name=name, root_url=url)", "_default_document def curplot(): ''' Return the current default plot object. Returns: plot :", "details.) new (str, optional) : new file output mode (default: \"tab\") For file-based", "within a request context. (Applets do this for example) in this case you", "url = DEFAULT_SERVER_URL if name is None: name = url if not session:", "is pushed Returns: None \"\"\" if not session: session = cursession() if not", "to inlude BokehJS (default: \"inline\") **mode** can be 'inline', 'cdn', 'relative(-dev)' or 'absolute(-dev)'.", "len(_color_fields.intersection(set(kwargs.keys()))): kwargs['color'] = get_default_color() if not len(_alpha_fields.intersection(set(kwargs.keys()))): kwargs['alpha'] = get_default_alpha() if markertype not", "IPython.core.displaypub as displaypub displaypub.publish_display_data('bokeh', {'text/html': notebook_div(plot)}) elif session: push() if url: controller.open(url, new=new_param)", "notebook = _default_notebook # Map our string argument to the webbrowser.open argument new_param", "to the webbrowser.open argument new_param = {'tab': 2, 'window': 1}[new] controller = browserlib.get_browser_controller(browser=browser)", "current plot to show. Use renderer functions (circle, rect, etc.) to create a", "scatter() Returns: None \"\"\" print(list(sorted(_marker_types.keys()))) _color_fields = set([\"color\", \"fill_color\", \"line_color\"]) _alpha_fields = set([\"alpha\",", "if url is \"default\" use session.DEFAULT_SERVER_URL name (str, optional) : if name is", "name will be overwritten. .. note:: Generally, this should be called at the", "document under (default: None) if `filename` is None, the current output_file(...) filename is", "\"\"\" global _default_session if url == \"default\": url = DEFAULT_SERVER_URL if name is", "to show with (default: None) For systems that support it, the **browser** argument", "\"IPython Session at %s\" % time.ctime() output_server(docname, url=url, session=session, name=name) else: from .", "not curplot(): warnings.warn(\"No current plot to save. Use renderer functions (circle, rect, etc.)", "session url (str, optianal) : URL of the Bokeh server (default: \"default\") if", "filename to save document under (default: None) if `filename` is None, the current", "arrange in a grid name (str) : name for this plot .. note::", "but no filename was supplied and output_file(...) was never called, nothing saved\") return", "logging.getLogger(__name__) _default_document = Document() _default_session = None _default_file = None _default_notebook = None", "in a grid name (str) : name for this plot .. note:: `plot_arrangement`", "**new** is 'window', then opens a new window. \"\"\" filename = _default_file['filename'] if", "patch = _doc_wrap(gf.patch) patches = _doc_wrap(gf.patches) quad = _doc_wrap(gf.quad) quadratic = _doc_wrap(gf.quadratic) ray", "return _marker_types[markertype](*args, **kwargs) def gridplot(plot_arrangement, name=None): \"\"\" Generate a plot that arranges several", "Axis, ColumnDataSource, Glyph, Grid, GridPlot, Legend from .palettes import brewer from .plotting_helpers import", "''' return curdoc().curplot() def cursession(): ''' Return the current session, if there is", "= _doc_wrap(gf.circle) circle_cross = _doc_wrap(gf.circle_cross) circle_x = _doc_wrap(gf.circle_x) cross = _doc_wrap(gf.cross) diamond =", "is \"default\" use session.DEFAULT_SERVER_URL name (str, optional) : if name is None, use", "that acts on the current document, and is equivalent to curdoc().%s(...)\" % func.__name__", "and _default_file: filename = _default_file['filename'] if resources is None and _default_file: resources =", "\"\"\" Get the current :class:`grid <bokeh.objects.Grid>` object(s) Returns: Returns grid object or splattable", "obj in p.renderers if isinstance(obj, Grid) and obj.dimension==0] return _list_attr_splat(grid) def ygrid(): \"\"\"", "existing documents with the same name will be overwritten. .. note:: Generally, this", "pushed\") def _doc_wrap(func): @wraps(func) def wrapper(*args, **kwargs): retval = func(curdoc(), *args, **kwargs) if", "and :ref:`userguide_fill_properties` are also accepted as keyword parameters. Examples: >>> scatter([1,2,3],[4,5,6], fill_color=\"red\") >>>", "an interactive session or the top of a script. .. note:: Calling this", "can be specified to indicate the base directory from which the path to", "\"inline\") **mode** can be 'inline', 'cdn', 'relative(-dev)' or 'absolute(-dev)'. In the 'relative(-dev)' case,", "mode (str, optional) : how to inlude BokehJS (default: \"inline\") **mode** can be", "for scatter() Returns: None \"\"\" print(list(sorted(_marker_types.keys()))) _color_fields = set([\"color\", \"fill_color\", \"line_color\"]) _alpha_fields =", "grid name (str) : name for this plot .. note:: `plot_arrangement` can be", "URL as the name Additional keyword arguments like **username**, **userapikey**, and **base_url** can", "object or splattable list of y-grid objects on the current plot \"\"\" p", "= _doc_wrap(gf.patches) quad = _doc_wrap(gf.quad) quadratic = _doc_wrap(gf.quadratic) ray = _doc_wrap(gf.ray) rect =", "RuntimeError, AttributeError): return _default_document def curplot(): ''' Return the current default plot object.", "new=new_param) def save(filename=None, resources=None): \"\"\" Updates the file with the data for the", "axis object(s) Returns: Returns x-axis object or splattable list of x-axis objects on", "= _doc_wrap(gf.segment) square = _doc_wrap(gf.square) square_cross = _doc_wrap(gf.square_cross) square_x = _doc_wrap(gf.square_x) text =", "BokehJS (default: \"inline\") **mode** can be 'inline', 'cdn', 'relative(-dev)' or 'absolute(-dev)'. In the", "_list_attr_splat(grid) def ygrid(): \"\"\" Get the current `y` :class:`grid <bokeh.objects.Grid>` object(s) Returns: Returns", "to plot. Can be of several forms: (X, Y) Two 1D arrays or", "the current axis objects Returns: Returns axis object or splattable list of axis", "(bool, optional) : whether to automatically save (default: True) If **autosave** is True,", "that support it, the **browser** argument allows specifying which browser to display in,", "== \"default\": url = DEFAULT_SERVER_URL if name is None: name = url if", "overwritten. .. note:: Generally, this should be called at the beginning of an", "present document (Document, optional) : BokehJS document to push if `document` is None,", "= { \"asterisk\": asterisk, \"circle\": circle, \"circle_cross\": circle_cross, \"circle_x\": circle_x, \"cross\": cross, \"diamond\":", "def markers(): \"\"\" Prints a list of valid marker types for scatter() Returns:", "output cell (IPython notebook). Args: browser (str, optional) : browser to show with", "the various static files should be computed. .. note:: Generally, this should be", "optional) : An explicit session to use (default: None) If session is None,", "document, and is equivalent to curdoc().hold(...) Args: value (bool, optional) : whether hold", "x, \"*\": asterisk, \"+\": cross, \"o\": circle, \"ox\": circle_x, \"o+\": circle_cross, } def", "document (Document, optional) : BokehJS document to push if `document` is None, the", "if _default_file and _default_file['autosave']: save() return grid def xaxis(): \"\"\" Get the current", "Legend from .palettes import brewer from .plotting_helpers import ( get_default_color, get_default_alpha, _handle_1d_data_args, _list_attr_splat", "use the server URL as the name Additional keyword arguments like **username**, **userapikey**,", "warnings.warn(\"save() called but no filename was supplied and output_file(...) was never called, nothing", "current document. Args: session (Sesion, optional) : filename to save document under (default:", "file_html(curdoc(), resources, _default_file['title']) with open(filename, \"w\") as f: f.write(html) def push(session=None, document=None): \"\"\"", "square_x, \"square_cross\": square_cross, \"triangle\": triangle, \"x\": x, \"*\": asterisk, \"+\": cross, \"o\": circle,", "GridPlot, Legend from .palettes import brewer from .plotting_helpers import ( get_default_color, get_default_alpha, _handle_1d_data_args,", "print(\"Session output file '%s' already exists, will be overwritten.\" % filename) def show(browser=None,", "displaying the current plot (for file/server output modes) or displaying it in an", "nested, e.g [[p1, p2], [p3, p4]] Returns: grid_plot: the current :class:`GridPlot <bokeh.objects.GridPlot>` \"\"\"", "at the beginning of an interactive session or the top of a script.", "_doc_wrap(gf.cross) diamond = _doc_wrap(gf.diamond) diamond_cross = _doc_wrap(gf.diamond_cross) image = _doc_wrap(gf.image) image_rgba = _doc_wrap(gf.image_rgba)", "it, the **browser** argument allows specifying which browser to display in, e.g. \"safari\",", "acts on the current document, and is equivalent to curdoc().hold(...) Args: value (bool,", "Calling this function will replaces any existing default Server session \"\"\" global _default_session", "files should be computed. .. note:: Generally, this should be called at the", "(list[:class:`Plot <bokeh.objects.Plot>`]) : plots to arrange in a grid name (str) : name", "\"\"\" Prints a list of valid marker types for scatter() Returns: None \"\"\"", "is None: return None grid = [obj for obj in p.renderers if isinstance(obj,", "= { 'filename' : filename, 'resources' : Resources(mode=mode, root_dir=root_dir, minified=False), 'autosave' : autosave,", "Returns: Returns y-axis object or splattable list of y-axis objects on the current", "convenience function that acts on the current document, and is equivalent to curdoc().%s(...)\"", "plot() or one of the other visual functions is called, this causes the", "also be supplied. Returns: None .. note:: Generally, this should be called at", "= None _default_file = None _default_notebook = None def curdoc(): ''' Return the", "2, 'window': 1}[new] controller = browserlib.get_browser_controller(browser=browser) plot = curplot() if not plot: warnings.warn(\"No", "in the standard lib for more details.) new (str, optional) : new file", "systems that support it, the **browser** argument allows specifying which browser to display", "output, opens or raises the browser window showing the current output file. If", "explicit session to use (default: None) If session is None, use the default", "objects Returns: Returns axis object or splattable list of axis objects on the", "be computed. .. note:: Generally, this should be called at the beginning of", "[obj for obj in p.renderers if isinstance(obj, Axis) and obj.dimension==0] return _list_attr_splat(axis) def", "gridplot(plot_arrangement, name=None): \"\"\" Generate a plot that arranges several subplots into a grid.", "name: if docname is None: docname = \"IPython Session at %s\" % time.ctime()", "session: return session.store_document(curdoc()) else: warnings.warn(\"push() called but no session was supplied and output_server(...)", "**username**, **userapikey**, and **base_url** can also be supplied. Returns: None .. note:: Generally,", ": new file output mode (default: \"tab\") For file-based output, opens or raises", "notebook_div(plot)}) elif session: push() if url: controller.open(url, new=new_param) else: controller.open(session.object_link(curdoc()._plotcontext)) elif filename: save(filename)", "be overwritten. session (Session, optional) : An explicit session to use (default: None)", "the current document. Returns: doc : the current default document object. ''' try:", "'tab', then opens a new tab. If **new** is 'window', then opens a", "None \"\"\" if not session: session = cursession() if not document: document =", "script. \"\"\" global _default_file _default_file = { 'filename' : filename, 'resources' : Resources(mode=mode,", "legend object or splattable list of legend objects on the current plot \"\"\"", "current default document is pushed Returns: None \"\"\" if not session: session =", "\"\\nThis is a convenience function that acts on the current document, and is", "data for the current document. If a filename is supplied, or output_file(...) has", "of y-grid objects on the current plot \"\"\" p = curplot() if p", "is None: docname = \"IPython Session at %s\" % time.ctime() output_server(docname, url=url, session=session,", "None and _default_file: resources = _default_file['resources'] if not filename: warnings.warn(\"save() called but no", "Get the current :class:`grid <bokeh.objects.Grid>` object(s) Returns: Returns grid object or splattable list", "+= \"\\nThis is a convenience function that acts on the current document, and", "plot style keyword parameters. Returns: None ''' curdoc().figure(**kwargs) def output_server(docname, session=None, url=\"default\", name=None):", "session, if there is one. Returns: session : the current default session object", "get_default_alpha() if markertype not in _marker_types: raise ValueError(\"Invalid marker type '%s'. Use markers()", "time.ctime() output_server(docname, url=url, session=session, name=name) else: from . import load_notebook load_notebook() global _default_notebook", "color (color value, optional): shorthand to set both fill and line color All", "notebook). Args: browser (str, optional) : browser to show with (default: None) For", "True) Returns: None ''' curdoc().hold(value) def figure(**kwargs): ''' Activate a new figure for", "if `filename` is None, the current output_file(...) filename is used if present resources", "triangle, \"x\": x, \"*\": asterisk, \"+\": cross, \"o\": circle, \"ox\": circle_x, \"o+\": circle_cross,", "session was supplied and output_server(...) was never called, nothing pushed\") def _doc_wrap(func): @wraps(func)", "asterisk, \"+\": cross, \"o\": circle, \"ox\": circle_x, \"o+\": circle_cross, } def markers(): \"\"\"", "\"\"\" Cause plotting commands to automatically persist plots to the Bokeh cloud server.", "(default: None) For systems that support it, the **browser** argument allows specifying which", "_default_file and _default_file['autosave']: save() return grid def xaxis(): \"\"\" Get the current axis", "`sessiokn` is None, the current output_server(...) session is used if present document (Document,", "If a filename is supplied, or output_file(...) has been called, this will save", "% func.__name__ return wrapper annular_wedge = _doc_wrap(gf.annular_wedge) annulus = _doc_wrap(gf.annulus) arc = _doc_wrap(gf.arc)", "raise ValueError(\"Invalid marker type '%s'. Use markers() to see a list of valid", "saved\") return if not resources: warnings.warn(\"save() called but no resources was supplied and", "function that acts on the current document, and is equivalent to curdoc().hold(...) Args:", "else: from . import load_notebook load_notebook() global _default_notebook _default_notebook = True def output_file(filename,", "interactive session or the top of a script. .. note:: Calling this function", "webbrowser.open argument new_param = {'tab': 2, 'window': 1}[new] controller = browserlib.get_browser_controller(browser=browser) plot =", "url == \"default\": url = DEFAULT_SERVER_URL if name is None: name = url", "notebook: import IPython.core.displaypub as displaypub displaypub.publish_display_data('bokeh', {'text/html': notebook_div(plot)}) elif session: push() if url:", "optional) : browser to show with (default: None) For systems that support it,", "then opens a new tab. If **new** is 'window', then opens a new", "the current output file. If **new** is 'tab', then opens a new tab.", "output_file(...) was never called, nothing saved\") return if not curplot(): warnings.warn(\"No current plot", "the plotting API from within the server, within a request context. (Applets do", "% time.ctime() output_server(docname, url=url, session=session, name=name) else: from . import load_notebook load_notebook() global", "circle_cross, } def markers(): \"\"\" Prints a list of valid marker types for", "on the current document, and is equivalent to curdoc().%s(...)\" % func.__name__ return wrapper", "in an output cell (IPython notebook). Args: browser (str, optional) : browser to", "to save document under (default: None) if `sessiokn` is None, the current output_server(...)", "browserlib from . import _glyph_functions as gf from .document import Document from .embed", "= curdoc() if session: return session.store_document(curdoc()) else: warnings.warn(\"push() called but no session was", "'%s' already exists, will be overwritten.\" % filename) def show(browser=None, new=\"tab\", url=None): \"\"\"", "itertools import time import logging import os import uuid import warnings from .", "= DEFAULT_SERVER_URL if name is None: name = url if not session: if", "of an interactive session or the top of a script. \"\"\" global _default_file", "*args, **kwargs) if cursession() and curdoc()._autostore: push() if _default_file and _default_file['autosave']: save() return", "scatter([1,2,3],[4,5,6], fill_color=\"red\") >>> scatter(\"data1\", \"data2\", source=data_source, ...) \"\"\" ds = kwargs.get(\"source\", None) names,", "def hold(value=True): ''' Set or clear the plot hold status on the current", "output mode (default: \"tab\") For file-based output, opens or raises the browser window", "operations will affect the new figure. This function accepts all plot style keyword", "supplied and output_server(...) was never called, nothing pushed\") def _doc_wrap(func): @wraps(func) def wrapper(*args,", "e.g. \"safari\", \"firefox\", \"opera\", \"windows-default\". (See the webbrowser module documentation in the standard", "in, e.g. \"safari\", \"firefox\", \"opera\", \"windows-default\". (See the webbrowser module documentation in the", "if _default_file and _default_file['autosave']: save() return retval wrapper.__doc__ += \"\\nThis is a convenience", "is used if present document (Document, optional) : BokehJS document to push if", "name (str, optional) : if name is None, use the server URL as", "multi_line = _doc_wrap(gf.multi_line) oval = _doc_wrap(gf.oval) patch = _doc_wrap(gf.patch) patches = _doc_wrap(gf.patches) quad", "to use if `resources` is None, the current default resource config is used", "_default_session: _default_session = Session(name=name, root_url=url) session = _default_session session.use_doc(docname) session.load_document(curdoc()) def output_cloud(docname): \"\"\"", "If session is None, use the default session url (str, optianal) : URL", "session or url or name: if docname is None: docname = \"IPython Session", "more details.) new (str, optional) : new file output mode (default: \"tab\") For", "which the path to the various static files should be computed. .. note::", "resources: warnings.warn(\"save() called but no resources was supplied and output_file(...) was never called,", "`document` is None, the current default document is pushed Returns: None \"\"\" if", "the current default plot (or None) ''' return curdoc().curplot() def cursession(): ''' Return", ".. note:: Generally, this should be called at the beginning of an interactive", "all plot style keyword parameters. Returns: None ''' curdoc().figure(**kwargs) def output_server(docname, session=None, url=\"default\",", "objects on the current plot \"\"\" return _list_attr_splat(xgrid() + ygrid()) def load_object(obj): \"\"\"updates", "os import uuid import warnings from . import browserlib from . import _glyph_functions", "the top of a script. \"\"\" global _default_file _default_file = { 'filename' :", "or splattable list of y-grid objects on the current plot \"\"\" p =", "Examples: >>> scatter([1,2,3],[4,5,6], fill_color=\"red\") >>> scatter(\"data1\", \"data2\", source=data_source, ...) \"\"\" ds = kwargs.get(\"source\",", "upon calling show(). mode (str, optional) : how to inlude BokehJS (default: \"inline\")", "browserlib.get_browser_controller(browser=browser) plot = curplot() if not plot: warnings.warn(\"No current plot to show. Use", "the Bokeh cloud server. Args: docname (str) : name of document to push", "curdoc().hold(value) def figure(**kwargs): ''' Activate a new figure for plotting. All subsequent plotting", "An existing documents with the same name will be overwritten. session (Session, optional)", "return _list_attr_splat(axis) def yaxis(): \"\"\" Get the current `y` axis object(s) Returns: Returns", ":class:`grid <bokeh.objects.Grid>` object(s) Returns: Returns legend object or splattable list of legend objects", "current `y` axis object(s) Returns: Returns y-axis object or splattable list of y-axis", "_doc_wrap(gf.oval) patch = _doc_wrap(gf.patch) patches = _doc_wrap(gf.patches) quad = _doc_wrap(gf.quad) quadratic = _doc_wrap(gf.quadratic)", "document object. ''' try: \"\"\"This is used when we need to call the", "_default_notebook = None def curdoc(): ''' Return the current document. Returns: doc :", "note:: Calling this function will replaces any existing default Server session \"\"\" output_server(docname,", "print(list(sorted(_marker_types.keys()))) _color_fields = set([\"color\", \"fill_color\", \"line_color\"]) _alpha_fields = set([\"alpha\", \"fill_alpha\", \"line_alpha\"]) def scatter(*args,", "arc = _doc_wrap(gf.arc) asterisk = _doc_wrap(gf.asterisk) bezier = _doc_wrap(gf.bezier) circle = _doc_wrap(gf.circle) circle_cross", "document under (default: None) if `sessiokn` is None, the current output_server(...) session is", "any existing default Server session \"\"\" global _default_session if url == \"default\": url", "is only saved upon calling show(). mode (str, optional) : how to inlude", "the base directory from which the path to the various static files should", "obj in p.renderers if isinstance(obj, Legend)] return _list_attr_splat(legends) def xgrid(): \"\"\" Get the", "was never called, nothing saved\") return if not curplot(): warnings.warn(\"No current plot to", "is None: name = url if not session: if not _default_session: _default_session =", "current plot (for file/server output modes) or displaying it in an output cell", "= _doc_wrap(gf.square) square_cross = _doc_wrap(gf.square_cross) square_x = _doc_wrap(gf.square_x) text = _doc_wrap(gf.text) triangle =", "output file. If **new** is 'tab', then opens a new tab. If **new**", "None, use the default session url (str, optianal) : URL of the Bokeh", "url: controller.open(url, new=new_param) else: controller.open(session.object_link(curdoc()._plotcontext)) elif filename: save(filename) controller.open(\"file://\" + os.path.abspath(filename), new=new_param) def", "name=None): \"\"\" Cause plotting commands to automatically persist plots to a Bokeh server.", "be 'inline', 'cdn', 'relative(-dev)' or 'absolute(-dev)'. In the 'relative(-dev)' case, **root_dir** can be", "items. Args: *args : The data to plot. Can be of several forms:", "renderer functions (circle, rect, etc.) to create a current plot (see http://bokeh.pydata.org/index.html)\") return", "filename: save(filename) controller.open(\"file://\" + os.path.abspath(filename), new=new_param) def save(filename=None, resources=None): \"\"\" Updates the file", "session (Sesion, optional) : filename to save document under (default: None) if `sessiokn`", "line color All the :ref:`userguide_line_properties` and :ref:`userguide_fill_properties` are also accepted as keyword parameters.", "wrapper annular_wedge = _doc_wrap(gf.annular_wedge) annulus = _doc_wrap(gf.annulus) arc = _doc_wrap(gf.arc) asterisk = _doc_wrap(gf.asterisk)", "if `sessiokn` is None, the current output_server(...) session is used if present document", "_list_attr_splat(xaxis() + yaxis()) def legend(): \"\"\" Get the current :class:`legend <bokeh.objects.Legend>` object(s) Returns:", "object(s) Returns: Returns x-axis object or splattable list of x-axis objects on the", "<bokeh.objects.Legend>` object(s) Returns: Returns legend object or splattable list of legend objects on", "_doc_wrap(gf.x) _marker_types = { \"asterisk\": asterisk, \"circle\": circle, \"circle_cross\": circle_cross, \"circle_x\": circle_x, \"cross\":", "(default: None) If session is None, use the default session url (str, optianal)", ": the current default plot (or None) ''' return curdoc().curplot() def cursession(): '''", "return wrapper annular_wedge = _doc_wrap(gf.annular_wedge) annulus = _doc_wrap(gf.annulus) arc = _doc_wrap(gf.arc) asterisk =", "= [obj for obj in p.renderers if isinstance(obj, Grid) and obj.dimension==1] return _list_attr_splat(grid)", "Session(name=name, root_url=url) session = _default_session session.use_doc(docname) session.load_document(curdoc()) def output_cloud(docname): \"\"\" Cause plotting commands", "(default: None) if `sessiokn` is None, the current output_server(...) session is used if", "mode=\"inline\", root_dir=None): \"\"\" Outputs to a static HTML file. .. note:: This file", "Get the current `y` axis object(s) Returns: Returns y-axis object or splattable list", ": plots to arrange in a grid name (str) : name for this", "None) ''' return curdoc().curplot() def cursession(): ''' Return the current session, if there", "asterisk = _doc_wrap(gf.asterisk) bezier = _doc_wrap(gf.bezier) circle = _doc_wrap(gf.circle) circle_cross = _doc_wrap(gf.circle_cross) circle_x", "is None, the current output_server(...) session is used if present document (Document, optional)", "list(set(curdoc().get_context().children) - set(subplots)) curdoc().add(grid) curdoc()._current_plot = grid # TODO (bev) don't use private", "Returns x-axis object or splattable list of x-axis objects on the current plot", "document is pushed Returns: None \"\"\" if not session: session = cursession() if", "axis object(s) Returns: Returns y-axis object or splattable list of y-axis objects on", "curdoc().curplot() def cursession(): ''' Return the current session, if there is one. Returns:", "name for this plot .. note:: `plot_arrangement` can be nested, e.g [[p1, p2],", "accepted as keyword parameters. Examples: >>> scatter([1,2,3],[4,5,6], fill_color=\"red\") >>> scatter(\"data1\", \"data2\", source=data_source, ...)", "\"default\": url = DEFAULT_SERVER_URL if name is None: name = url if not", "output_cloud(docname): \"\"\" Cause plotting commands to automatically persist plots to the Bokeh cloud", "_doc_wrap(gf.triangle) wedge = _doc_wrap(gf.wedge) x = _doc_wrap(gf.x) _marker_types = { \"asterisk\": asterisk, \"circle\":", "_doc_wrap(func): @wraps(func) def wrapper(*args, **kwargs): retval = func(curdoc(), *args, **kwargs) if cursession() and", "circle, \"circle_cross\": circle_cross, \"circle_x\": circle_x, \"cross\": cross, \"diamond\": diamond, \"diamond_cross\": diamond_cross, \"inverted_triangle\": inverted_triangle,", "_doc_wrap(gf.line) multi_line = _doc_wrap(gf.multi_line) oval = _doc_wrap(gf.oval) patch = _doc_wrap(gf.patch) patches = _doc_wrap(gf.patches)", "= kwargs.get(\"source\", None) names, datasource = _handle_1d_data_args(args, datasource=ds) kwargs[\"source\"] = datasource markertype =", "persist plots to the Bokeh cloud server. Args: docname (str) : name of", "marker types for scatter() Returns: None \"\"\" print(list(sorted(_marker_types.keys()))) _color_fields = set([\"color\", \"fill_color\", \"line_color\"])", "overwritten each time show() or save() is invoked. Args: autosave (bool, optional) :", "subplots = itertools.chain.from_iterable(plot_arrangement) curdoc().get_context().children = list(set(curdoc().get_context().children) - set(subplots)) curdoc().add(grid) curdoc()._current_plot = grid #", "'autosave' : autosave, 'title' : title, } if os.path.isfile(filename): print(\"Session output file '%s'", "new window. \"\"\" filename = _default_file['filename'] if _default_file else None session = cursession()", "docname (str) : name of document to push on Bokeh server An existing", "_default_session: push() if _default_file and _default_file['autosave']: save() return grid def xaxis(): \"\"\" Get", "'shows' the current plot, by auto-raising the window or tab displaying the current", "file output mode (default: \"tab\") For file-based output, opens or raises the browser", "Returns: None \"\"\" print(list(sorted(_marker_types.keys()))) _color_fields = set([\"color\", \"fill_color\", \"line_color\"]) _alpha_fields = set([\"alpha\", \"fill_alpha\",", "forms: (X, Y) Two 1D arrays or iterables (XNAME, YNAME) Two bokeh DataSource/ColumnsRef", "def wrapper(*args, **kwargs): retval = func(curdoc(), *args, **kwargs) if cursession() and curdoc()._autostore: push()", "None session = cursession() notebook = _default_notebook # Map our string argument to", "not len(_alpha_fields.intersection(set(kwargs.keys()))): kwargs['alpha'] = get_default_alpha() if markertype not in _marker_types: raise ValueError(\"Invalid marker", "called but no session was supplied and output_server(...) was never called, nothing pushed\")", "Resources from .session import Cloud, DEFAULT_SERVER_URL, Session logger = logging.getLogger(__name__) _default_document = Document()", "of the given x and y items. Args: *args : The data to", "each time show() or save() is invoked. Args: autosave (bool, optional) : whether", "the current `x` axis object(s) Returns: Returns x-axis object or splattable list of", "curplot(): ''' Return the current default plot object. Returns: plot : the current", "output_file(filename, title=\"Bokeh Plot\", autosave=True, mode=\"inline\", root_dir=None): \"\"\" Outputs to a static HTML file.", "hold(value=True): ''' Set or clear the plot hold status on the current document.", "e.g [[p1, p2], [p3, p4]] Returns: grid_plot: the current :class:`GridPlot <bokeh.objects.GridPlot>` \"\"\" grid", "or splattable list of axis objects on the current plot \"\"\" p =", "file '%s' already exists, will be overwritten.\" % filename) def show(browser=None, new=\"tab\", url=None):", "given x and y items. Args: *args : The data to plot. Can", ": name for this plot .. note:: `plot_arrangement` can be nested, e.g [[p1,", "server, within a request context. (Applets do this for example) in this case", "''' curdoc().hold(value) def figure(**kwargs): ''' Activate a new figure for plotting. All subsequent", "defaults to \"circle\" color (color value, optional): shorthand to set both fill and", "but no resources was supplied and output_file(...) was never called, nothing saved\") return", "kwargs['alpha'] = get_default_alpha() if markertype not in _marker_types: raise ValueError(\"Invalid marker type '%s'.", "filename to save document under (default: None) if `sessiokn` is None, the current", "current output file. If **new** is 'tab', then opens a new tab. If", "[obj for obj in p.renderers if isinstance(obj, Legend)] return _list_attr_splat(legends) def xgrid(): \"\"\"", "Two 1D arrays or iterables (XNAME, YNAME) Two bokeh DataSource/ColumnsRef marker (str, optional):", "turned on or off (default: True) Returns: None ''' curdoc().hold(value) def figure(**kwargs): '''", "_glyph_functions as gf from .document import Document from .embed import notebook_div, file_html, autoload_server", "def yaxis(): \"\"\" Get the current `y` axis object(s) Returns: Returns y-axis object", "import IPython.core.displaypub as displaypub push(session=session) snippet = autoload_server(plot, cursession()) displaypub.publish_display_data('bokeh', {'text/html': snippet}) elif", "it in an output cell (IPython notebook). Args: browser (str, optional) : browser", "html = file_html(curdoc(), resources, _default_file['title']) with open(filename, \"w\") as f: f.write(html) def push(session=None,", "[p3, p4]] Returns: grid_plot: the current :class:`GridPlot <bokeh.objects.GridPlot>` \"\"\" grid = GridPlot(children=plot_arrangement) if", "else: warnings.warn(\"push() called but no session was supplied and output_server(...) was never called,", "current :class:`GridPlot <bokeh.objects.GridPlot>` \"\"\" grid = GridPlot(children=plot_arrangement) if name: grid._id = name #", "object(s) Returns: Returns y-axis object or splattable list of y-axis objects on the", "filename: warnings.warn(\"save() called but no filename was supplied and output_file(...) was never called,", "persistence, or the default session. Args: docname (str) : name of document to", "_doc_wrap(gf.square) square_cross = _doc_wrap(gf.square_cross) square_x = _doc_wrap(gf.square_x) text = _doc_wrap(gf.text) triangle = _doc_wrap(gf.triangle)", "`x` axis object(s) Returns: Returns x-axis object or splattable list of x-axis objects", "optional) : whether to automatically save (default: True) If **autosave** is True, then", "= request.bokeh_server_document logger.debug(\"returning config from flask request\") return doc except (ImportError, RuntimeError, AttributeError):", "cursession() notebook = _default_notebook # Map our string argument to the webbrowser.open argument", "Returns: None \"\"\" if not session: session = cursession() if not document: document", "markertype = kwargs.get(\"marker\", \"circle\") # TODO: How to handle this? Just call curplot()?", "Args: docname (str) : name of document to push on Bokeh server An", "_doc_wrap(gf.diamond) diamond_cross = _doc_wrap(gf.diamond_cross) image = _doc_wrap(gf.image) image_rgba = _doc_wrap(gf.image_rgba) image_url = _doc_wrap(gf.image_url)", "doc : the current default document object. ''' try: \"\"\"This is used when", "isinstance(obj, Axis) and obj.dimension==1] return _list_attr_splat(axis) def axis(): \"\"\" Get the current `x`", "list of valid marker types for scatter() Returns: None \"\"\" print(list(sorted(_marker_types.keys()))) _color_fields =", "Returns: None ''' curdoc().figure(**kwargs) def output_server(docname, session=None, url=\"default\", name=None): \"\"\" Cause plotting commands", "private attrs if _default_session: push() if _default_file and _default_file['autosave']: save() return grid def", "bezier = _doc_wrap(gf.bezier) circle = _doc_wrap(gf.circle) circle_cross = _doc_wrap(gf.circle_cross) circle_x = _doc_wrap(gf.circle_x) cross", "if filename is None and _default_file: filename = _default_file['filename'] if resources is None", "axis = [obj for obj in p.renderers if isinstance(obj, Axis) and obj.dimension==0] return", "commands to automatically persist plots to the Bokeh cloud server. Args: docname (str)", "example) in this case you still want the API to work but you", "def _doc_wrap(func): @wraps(func) def wrapper(*args, **kwargs): retval = func(curdoc(), *args, **kwargs) if cursession()", "if session: return session.store_document(curdoc()) else: warnings.warn(\"push() called but no session was supplied and", "title=\"Bokeh Plot\", autosave=True, mode=\"inline\", root_dir=None): \"\"\" Outputs to a static HTML file. ..", "new file output mode (default: \"tab\") For file-based output, opens or raises the", "saved upon calling show(). mode (str, optional) : how to inlude BokehJS (default:", "as keyword parameters. Examples: >>> scatter([1,2,3],[4,5,6], fill_color=\"red\") >>> scatter(\"data1\", \"data2\", source=data_source, ...) \"\"\"", ": filename, 'resources' : Resources(mode=mode, root_dir=root_dir, minified=False), 'autosave' : autosave, 'title' : title,", "one of the other visual functions is called, this causes the file to", "_doc_wrap(gf.segment) square = _doc_wrap(gf.square) square_cross = _doc_wrap(gf.square_cross) square_x = _doc_wrap(gf.square_x) text = _doc_wrap(gf.text)", "curplot() if p is None: return None axis = [obj for obj in", "return _default_session def hold(value=True): ''' Set or clear the plot hold status on", "already exists, will be overwritten.\" % filename) def show(browser=None, new=\"tab\", url=None): \"\"\" 'shows'", "line = _doc_wrap(gf.line) multi_line = _doc_wrap(gf.multi_line) oval = _doc_wrap(gf.oval) patch = _doc_wrap(gf.patch) patches", "**kwargs): \"\"\" Creates a scatter plot of the given x and y items.", "p = curplot() if p is None: return None legends = [obj for", "session: push() if url: controller.open(url, new=new_param) else: controller.open(session.object_link(curdoc()._plotcontext)) elif filename: save(filename) controller.open(\"file://\" +", "if `document` is None, the current default document is pushed Returns: None \"\"\"", "existing default Server session \"\"\" global _default_session if url == \"default\": url =", "cursession() if not document: document = curdoc() if session: return session.store_document(curdoc()) else: warnings.warn(\"push()", "in this case you still want the API to work but you don't", "= _doc_wrap(gf.asterisk) bezier = _doc_wrap(gf.bezier) circle = _doc_wrap(gf.circle) circle_cross = _doc_wrap(gf.circle_cross) circle_x =", "if `resources` is None, the current default resource config is used Returns: None", "lib for more details.) new (str, optional) : new file output mode (default:", "{ \"asterisk\": asterisk, \"circle\": circle, \"circle_cross\": circle_cross, \"circle_x\": circle_x, \"cross\": cross, \"diamond\": diamond,", "from functools import wraps import itertools import time import logging import os import", "= Session(name=name, root_url=url) session = _default_session session.use_doc(docname) session.load_document(curdoc()) def output_cloud(docname): \"\"\" Cause plotting", "(See the webbrowser module documentation in the standard lib for more details.) new", "_doc_wrap(gf.inverted_triangle) line = _doc_wrap(gf.line) multi_line = _doc_wrap(gf.multi_line) oval = _doc_wrap(gf.oval) patch = _doc_wrap(gf.patch)", "axis object or splattable list of axis objects on the current plot \"\"\"", "whether to automatically save (default: True) If **autosave** is True, then every time", "as displaypub displaypub.publish_display_data('bokeh', {'text/html': notebook_div(plot)}) elif session: push() if url: controller.open(url, new=new_param) else:", "grid object or splattable list of grid objects on the current plot \"\"\"", "config is used Returns: None \"\"\" if filename is None and _default_file: filename", "name Additional keyword arguments like **username**, **userapikey**, and **base_url** can also be supplied.", "output_file(...) was never called, nothing saved\") return if not resources: warnings.warn(\"save() called but", "function will replaces any existing default Server session \"\"\" global _default_session if url", "retval = func(curdoc(), *args, **kwargs) if cursession() and curdoc()._autostore: push() if _default_file and", "\"circle\": circle, \"circle_cross\": circle_cross, \"circle_x\": circle_x, \"cross\": cross, \"diamond\": diamond, \"diamond_cross\": diamond_cross, \"inverted_triangle\":", "them from the plotcontext, # so they don't show up twice subplots =", "of an interactive session or the top of a script. .. note:: Calling", "and remove them from the plotcontext, # so they don't show up twice", "= logging.getLogger(__name__) _default_document = Document() _default_session = None _default_file = None _default_notebook =", "IPython.core.displaypub as displaypub push(session=session) snippet = autoload_server(plot, cursession()) displaypub.publish_display_data('bokeh', {'text/html': snippet}) elif notebook:", "note:: `plot_arrangement` can be nested, e.g [[p1, p2], [p3, p4]] Returns: grid_plot: the", "resource config is used Returns: None \"\"\" if filename is None and _default_file:", "_doc_wrap(gf.circle) circle_cross = _doc_wrap(gf.circle_cross) circle_x = _doc_wrap(gf.circle_x) cross = _doc_wrap(gf.cross) diamond = _doc_wrap(gf.diamond)", ".plotting_helpers import ( get_default_color, get_default_alpha, _handle_1d_data_args, _list_attr_splat ) from .resources import Resources from", "plotting operations will affect the new figure. This function accepts all plot style", "\"o\": circle, \"ox\": circle_x, \"o+\": circle_cross, } def markers(): \"\"\" Prints a list", "a grid name (str) : name for this plot .. note:: `plot_arrangement` can", "iterables (XNAME, YNAME) Two bokeh DataSource/ColumnsRef marker (str, optional): a valid marker_type, defaults", "= GridPlot(children=plot_arrangement) if name: grid._id = name # Walk the plot_arrangement and remove", "isinstance(obj, Axis) and obj.dimension==0] return _list_attr_splat(axis) def yaxis(): \"\"\" Get the current `y`", "acts on the current document, and is equivalent to curdoc().%s(...)\" % func.__name__ return", "(color value, optional): shorthand to set both fill and line color All the", "None) For systems that support it, the **browser** argument allows specifying which browser", "def output_cloud(docname): \"\"\" Cause plotting commands to automatically persist plots to the Bokeh", "p = curplot() if p is None: return None grid = [obj for", "saved. If it is False, then the file is only saved upon calling", "figure. This function accepts all plot style keyword parameters. Returns: None ''' curdoc().figure(**kwargs)", "plots to arrange in a grid name (str) : name for this plot", "'window', then opens a new window. \"\"\" filename = _default_file['filename'] if _default_file else", "\"\"\" Get the current `x` :class:`grid <bokeh.objects.Grid>` object(s) Returns: Returns legend object or", "browser (str, optional) : browser to show with (default: None) For systems that", "_marker_types[markertype](*args, **kwargs) def gridplot(plot_arrangement, name=None): \"\"\" Generate a plot that arranges several subplots", "will be overwritten. .. note:: Generally, this should be called at the beginning", "= browserlib.get_browser_controller(browser=browser) plot = curplot() if not plot: warnings.warn(\"No current plot to show.", "inlude BokehJS (default: \"inline\") **mode** can be 'inline', 'cdn', 'relative(-dev)' or 'absolute(-dev)'. In", "annular_wedge = _doc_wrap(gf.annular_wedge) annulus = _doc_wrap(gf.annulus) arc = _doc_wrap(gf.arc) asterisk = _doc_wrap(gf.asterisk) bezier", "Session for persistence, or the default session. Args: docname (str) : name of", "docname = \"IPython Session at %s\" % time.ctime() output_server(docname, url=url, session=session, name=name) else:", "Returns: grid_plot: the current :class:`GridPlot <bokeh.objects.GridPlot>` \"\"\" grid = GridPlot(children=plot_arrangement) if name: grid._id", "server (default: \"default\") if url is \"default\" use session.DEFAULT_SERVER_URL name (str, optional) :", "function will replaces any existing default Server session \"\"\" output_server(docname, session=Cloud()) def output_notebook(url=None,", "from __future__ import print_function from functools import wraps import itertools import time import", "if isinstance(obj, Axis) and obj.dimension==1] return _list_attr_splat(axis) def axis(): \"\"\" Get the current", "the current `y` :class:`grid <bokeh.objects.Grid>` object(s) Returns: Returns y-grid object or splattable list", "= Document() _default_session = None _default_file = None _default_notebook = None def curdoc():", "_default_session = Session(name=name, root_url=url) session = _default_session session.use_doc(docname) session.load_document(curdoc()) def output_cloud(docname): \"\"\" Cause", "= _default_file['filename'] if _default_file else None session = cursession() notebook = _default_notebook #", "'window': 1}[new] controller = browserlib.get_browser_controller(browser=browser) plot = curplot() if not plot: warnings.warn(\"No current", "triangle = _doc_wrap(gf.triangle) wedge = _doc_wrap(gf.wedge) x = _doc_wrap(gf.x) _marker_types = { \"asterisk\":", "with the same name will be overwritten. session (Session, optional) : An explicit", "current `x` axis object(s) Returns: Returns x-axis object or splattable list of x-axis", "import IPython.core.displaypub as displaypub displaypub.publish_display_data('bokeh', {'text/html': notebook_div(plot)}) elif session: push() if url: controller.open(url,", "under (default: None) if `sessiokn` is None, the current output_server(...) session is used", "be turned on or off (default: True) Returns: None ''' curdoc().hold(value) def figure(**kwargs):", "if not session: if not _default_session: _default_session = Session(name=name, root_url=url) session = _default_session", "is None and _default_file: filename = _default_file['filename'] if resources is None and _default_file:", "optional): a valid marker_type, defaults to \"circle\" color (color value, optional): shorthand to", "= [obj for obj in p.renderers if isinstance(obj, Axis) and obj.dimension==0] return _list_attr_splat(axis)", "current default session object (or None) ''' return _default_session def hold(value=True): ''' Set", "An explicit session to use (default: None) If session is None, use the", "current plot, by auto-raising the window or tab displaying the current plot (for", "circle_x = _doc_wrap(gf.circle_x) cross = _doc_wrap(gf.cross) diamond = _doc_wrap(gf.diamond) diamond_cross = _doc_wrap(gf.diamond_cross) image", "yaxis()) def legend(): \"\"\" Get the current :class:`legend <bokeh.objects.Legend>` object(s) Returns: Returns legend", "global _default_notebook _default_notebook = True def output_file(filename, title=\"Bokeh Plot\", autosave=True, mode=\"inline\", root_dir=None): \"\"\"", "\"\"\" p = curplot() if p is None: return None legends = [obj", "new tab. If **new** is 'window', then opens a new window. \"\"\" filename", "\"w\") as f: f.write(html) def push(session=None, document=None): \"\"\" Updates the server with the", "autoload_server(plot, cursession()) displaypub.publish_display_data('bokeh', {'text/html': snippet}) elif notebook: import IPython.core.displaypub as displaypub displaypub.publish_display_data('bokeh', {'text/html':", ".. note:: This file will be overwritten each time show() or save() is", "the file to be saved. If it is False, then the file is", "for more details.) new (str, optional) : new file output mode (default: \"tab\")", "note:: Generally, this should be called at the beginning of an interactive session", "then opens a new window. \"\"\" filename = _default_file['filename'] if _default_file else None", ".. note:: Calling this function will replaces any existing default Server session \"\"\"", "plot to show. Use renderer functions (circle, rect, etc.) to create a current", "is None: return None axis = [obj for obj in p.renderers if isinstance(obj,", "Args: plot_arrangement (list[:class:`Plot <bokeh.objects.Plot>`]) : plots to arrange in a grid name (str)", "_handle_1d_data_args(args, datasource=ds) kwargs[\"source\"] = datasource markertype = kwargs.get(\"marker\", \"circle\") # TODO: How to", "_doc_wrap(gf.rect) segment = _doc_wrap(gf.segment) square = _doc_wrap(gf.square) square_cross = _doc_wrap(gf.square_cross) square_x = _doc_wrap(gf.square_x)", "_doc_wrap(gf.square_x) text = _doc_wrap(gf.text) triangle = _doc_wrap(gf.triangle) wedge = _doc_wrap(gf.wedge) x = _doc_wrap(gf.x)", "supplied. Returns: None .. note:: Generally, this should be called at the beginning", "to automatically persist plots to a Bokeh server. Can use explicitly provided Session", "how to inlude BokehJS (default: \"inline\") **mode** can be 'inline', 'cdn', 'relative(-dev)' or", "plot object. Returns: plot : the current default plot (or None) ''' return", "Resources(mode=mode, root_dir=root_dir, minified=False), 'autosave' : autosave, 'title' : title, } if os.path.isfile(filename): print(\"Session", "with the data for the current document. Args: session (Sesion, optional) : filename", "optional) : whether hold should be turned on or off (default: True) Returns:", "window showing the current output file. If **new** is 'tab', then opens a", "used if present document (Document, optional) : BokehJS document to push if `document`", "the current document. This is a convenience function that acts on the current", "session: session = cursession() if not document: document = curdoc() if session: return", "push on Bokeh server An existing documents with the same name will be", "\"\"\" return _list_attr_splat(xgrid() + ygrid()) def load_object(obj): \"\"\"updates object from the server \"\"\"", "if markertype not in _marker_types: raise ValueError(\"Invalid marker type '%s'. Use markers() to", "document. This is a convenience function that acts on the current document, and", "curdoc() if session: return session.store_document(curdoc()) else: warnings.warn(\"push() called but no session was supplied", "def save(filename=None, resources=None): \"\"\" Updates the file with the data for the current", "return _list_attr_splat(xaxis() + yaxis()) def legend(): \"\"\" Get the current :class:`legend <bokeh.objects.Legend>` object(s)", "from .palettes import brewer from .plotting_helpers import ( get_default_color, get_default_alpha, _handle_1d_data_args, _list_attr_splat )", "of the other visual functions is called, this causes the file to be", "return if notebook and session: import IPython.core.displaypub as displaypub push(session=session) snippet = autoload_server(plot,", "this will save the plot to the given filename. Args: filename (str, optional)", "= _doc_wrap(gf.diamond) diamond_cross = _doc_wrap(gf.diamond_cross) image = _doc_wrap(gf.image) image_rgba = _doc_wrap(gf.image_rgba) image_url =", "current `y` :class:`grid <bokeh.objects.Grid>` object(s) Returns: Returns y-grid object or splattable list of", "output_server(...) was never called, nothing pushed\") def _doc_wrap(func): @wraps(func) def wrapper(*args, **kwargs): retval", "if _default_file else None session = cursession() notebook = _default_notebook # Map our", "the new figure. This function accepts all plot style keyword parameters. Returns: None", "was never called, nothing pushed\") def _doc_wrap(func): @wraps(func) def wrapper(*args, **kwargs): retval =", "various static files should be computed. .. note:: Generally, this should be called", "circle_x, \"o+\": circle_cross, } def markers(): \"\"\" Prints a list of valid marker", "if name: grid._id = name # Walk the plot_arrangement and remove them from", ": the current default session object (or None) ''' return _default_session def hold(value=True):", "document. Args: session (Sesion, optional) : filename to save document under (default: None)", "_doc_wrap(gf.arc) asterisk = _doc_wrap(gf.asterisk) bezier = _doc_wrap(gf.bezier) circle = _doc_wrap(gf.circle) circle_cross = _doc_wrap(gf.circle_cross)", "the current session, if there is one. Returns: session : the current default", "and y items. Args: *args : The data to plot. Can be of", "\"circle\") # TODO: How to handle this? Just call curplot()? if not len(_color_fields.intersection(set(kwargs.keys()))):", "up twice subplots = itertools.chain.from_iterable(plot_arrangement) curdoc().get_context().children = list(set(curdoc().get_context().children) - set(subplots)) curdoc().add(grid) curdoc()._current_plot =", "None \"\"\" print(list(sorted(_marker_types.keys()))) _color_fields = set([\"color\", \"fill_color\", \"line_color\"]) _alpha_fields = set([\"alpha\", \"fill_alpha\", \"line_alpha\"])", "overwritten.\" % filename) def show(browser=None, new=\"tab\", url=None): \"\"\" 'shows' the current plot, by", "in p.renderers if isinstance(obj, Axis) and obj.dimension==1] return _list_attr_splat(axis) def axis(): \"\"\" Get", "will be overwritten.\" % filename) def show(browser=None, new=\"tab\", url=None): \"\"\" 'shows' the current", "was supplied and output_file(...) was never called, nothing saved\") return if not curplot():", "return curdoc().curplot() def cursession(): ''' Return the current session, if there is one.", "= _doc_wrap(gf.patch) patches = _doc_wrap(gf.patches) quad = _doc_wrap(gf.quad) quadratic = _doc_wrap(gf.quadratic) ray =", "Returns legend object or splattable list of legend objects on the current plot", "optional) : filename to save document under (default: None) if `filename` is None,", "filename, 'resources' : Resources(mode=mode, root_dir=root_dir, minified=False), 'autosave' : autosave, 'title' : title, }", "of legend objects on the current plot \"\"\" p = curplot() if p", "= get_default_alpha() if markertype not in _marker_types: raise ValueError(\"Invalid marker type '%s'. Use", "in _marker_types: raise ValueError(\"Invalid marker type '%s'. Use markers() to see a list", "datasource=ds) kwargs[\"source\"] = datasource markertype = kwargs.get(\"marker\", \"circle\") # TODO: How to handle", "import _glyph_functions as gf from .document import Document from .embed import notebook_div, file_html,", "circle = _doc_wrap(gf.circle) circle_cross = _doc_wrap(gf.circle_cross) circle_x = _doc_wrap(gf.circle_x) cross = _doc_wrap(gf.cross) diamond", "session: if not _default_session: _default_session = Session(name=name, root_url=url) session = _default_session session.use_doc(docname) session.load_document(curdoc())", "plot \"\"\" p = curplot() if p is None: return None axis =", "not session: if not _default_session: _default_session = Session(name=name, root_url=url) session = _default_session session.use_doc(docname)", "every time plot() or one of the other visual functions is called, this", "a script. .. note:: Calling this function will replaces any existing default Server", "but no session was supplied and output_server(...) was never called, nothing pushed\") def", "server URL as the name Additional keyword arguments like **username**, **userapikey**, and **base_url**", "file to be saved. If it is False, then the file is only", "= {'tab': 2, 'window': 1}[new] controller = browserlib.get_browser_controller(browser=browser) plot = curplot() if not", ": An explicit session to use (default: None) If session is None, use", "= autoload_server(plot, cursession()) displaypub.publish_display_data('bokeh', {'text/html': snippet}) elif notebook: import IPython.core.displaypub as displaypub displaypub.publish_display_data('bokeh',", "file_html, autoload_server from .objects import Axis, ColumnDataSource, Glyph, Grid, GridPlot, Legend from .palettes", "a static HTML file. .. note:: This file will be overwritten each time", "or splattable list of x-axis objects on the current plot \"\"\" return _list_attr_splat(xaxis()", "convenience function that acts on the current document, and is equivalent to curdoc().hold(...)", "from within the server, within a request context. (Applets do this for example)", "functions is called, this causes the file to be saved. If it is", "other visual functions is called, this causes the file to be saved. If", "None: docname = \"IPython Session at %s\" % time.ctime() output_server(docname, url=url, session=session, name=name)", "None: return None grid = [obj for obj in p.renderers if isinstance(obj, Grid)", "nothing saved\") return if not curplot(): warnings.warn(\"No current plot to save. Use renderer", "def gridplot(plot_arrangement, name=None): \"\"\" Generate a plot that arranges several subplots into a", "but you don't want to use the global module level document \"\"\" from", "# TODO: How to handle this? Just call curplot()? if not len(_color_fields.intersection(set(kwargs.keys()))): kwargs['color']", "Args: browser (str, optional) : browser to show with (default: None) For systems", "markers(): \"\"\" Prints a list of valid marker types for scatter() Returns: None", "**kwargs): retval = func(curdoc(), *args, **kwargs) if cursession() and curdoc()._autostore: push() if _default_file", "of valid marker types for scatter() Returns: None \"\"\" print(list(sorted(_marker_types.keys()))) _color_fields = set([\"color\",", "to save. Use renderer functions (circle, rect, etc.) to create a current plot", ": title, } if os.path.isfile(filename): print(\"Session output file '%s' already exists, will be", "time show() or save() is invoked. Args: autosave (bool, optional) : whether to", "automatically save (default: True) If **autosave** is True, then every time plot() or", "filename) def show(browser=None, new=\"tab\", url=None): \"\"\" 'shows' the current plot, by auto-raising the", "= list(set(curdoc().get_context().children) - set(subplots)) curdoc().add(grid) curdoc()._current_plot = grid # TODO (bev) don't use", "called but no filename was supplied and output_file(...) was never called, nothing saved\")", "so they don't show up twice subplots = itertools.chain.from_iterable(plot_arrangement) curdoc().get_context().children = list(set(curdoc().get_context().children) -", "_default_file['filename'] if resources is None and _default_file: resources = _default_file['resources'] if not filename:", "This function accepts all plot style keyword parameters. Returns: None ''' curdoc().figure(**kwargs) def", "_list_attr_splat(grid) def grid(): \"\"\" Get the current :class:`grid <bokeh.objects.Grid>` object(s) Returns: Returns grid", "patches = _doc_wrap(gf.patches) quad = _doc_wrap(gf.quad) quadratic = _doc_wrap(gf.quadratic) ray = _doc_wrap(gf.ray) rect", "browser window showing the current output file. If **new** is 'tab', then opens", "from .objects import Axis, ColumnDataSource, Glyph, Grid, GridPlot, Legend from .palettes import brewer", "def legend(): \"\"\" Get the current :class:`legend <bokeh.objects.Legend>` object(s) Returns: Returns legend object", "new=\"tab\", url=None): \"\"\" 'shows' the current plot, by auto-raising the window or tab", "call the plotting API from within the server, within a request context. (Applets", "None: name = url if not session: if not _default_session: _default_session = Session(name=name," ]
[ "AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,", "T \\) has positive support ie. \\( T \\in [0, \\infty) \\). *", "= datasets.load_dataset('SUPPORT') ``` #### `auton_survival.preprocessing` This module provides a flexible API to perform", "datasets features, outcomes = datasets.load_topcat() from auton_survival.preprocessing import Preprocessing features = Preprocessor().fit_transform(features, cat_feats=['GENDER',", "response to a specific intervention. Relies on the specially designed `auton_survival.models.cmhe.DeepCoxMixturesHeterogenousEffects` latent variable", "patients that demonstrate similar outcomes. In the context of this package, we refer", "a clustering algorithm on this representation. ```python from auton_survival.phenotyping import ClusteringPhenotyper # Dimensionality", "this software and associated documentation files (the \"Software\"), to deal in the Software", "OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE", "demonstrate similar outcomes. In the context of this package, we refer to this", "Parametric Time-to-Event Regression with Time-Varying Covariates}, author={<NAME> <NAME> <NAME>}, booktitle={Proceedings of AAAI Spring", "event of interest, \\( T \\) would take places given some features or", "covariates \\( x \\) followed by the use of a clustering algorithm on", "MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) &nbsp;&nbsp;&nbsp; [![GitHub Repo stars](https://img.shields.io/github/stars/autonlab/auton-survival?style=social)](https://github.com/autonlab/auton-survival) <img align=right style=\"align:right;\" src=\"https://ndownloader.figshare.com/files/34052981\" width=30%> <br> The `auton-survival`", "Involves first performing dimensionality reduction on the inpute covariates \\( x \\) followed", "the Software without restriction, including without limitation the rights to use, copy, modify,", "r''' [![Build Status](https://travis-ci.org/autonlab/DeepSurvivalMachines.svg?branch=master)](https://travis-ci.org/autonlab/DeepSurvivalMachines) &nbsp;&nbsp;&nbsp; [![codecov](https://codecov.io/gh/autonlab/DeepSurvivalMachines/branch/master/graph/badge.svg?token=FU1HB5O92D)](https://codecov.io/gh/autonlab/DeepSurvivalMachines) &nbsp;&nbsp;&nbsp; [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) &nbsp;&nbsp;&nbsp; [![GitHub Repo stars](https://img.shields.io/github/stars/autonlab/auton-survival?style=social)](https://github.com/autonlab/auton-survival) <img", "as regression to estimate the conditional survival distribution, \\( \\mathbb{P}(T>t|X) \\). As compared", "the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies", "= Preprocessor().fit_transform(features, cat_feats=['GENDER', 'ETHNICITY', 'SMOKE'], num_feats=['height', 'weight']) # The `cat_feats` and `num_feats` lists", "``` @article{nagpal2021dsm, title={Deep survival machines: Fully parametric survival regression and representation learning for", "without restriction, including without limitation the rights to use, copy, modify, merge, publish,", "in your research: [1] [Deep Survival Machines: Fully Parametric Survival Regression and Representation", "Car<NAME> University, [Auton Lab](http://autonlab.org) Permission is hereby granted, free of charge, to any", "Mixture Model (GMM) with 3 components and diagonal covariance. clustering_method, n_clusters = 'gmm',", "[Counterfactual Phenotyping with Censored Time-to-Events (2022)](https://arxiv.org/abs/2202.11089)</a> ``` @article{nagpal2022counterfactual, title={Counterfactual Phenotyping with Censored Time-to-Events},", "various time-to-event data like the popular `SUPPORT`, `FRAMINGHAM` and `PBC` dataset for survival", "sublicense, and/or sell copies of the Software, and to permit persons to whom", "phenotyping and propensity adjusted evaluation. **For complete details on** `auton-survival` **see**: <h3>• <a", "notice and this permission notice shall be included in all copies or substantial", "\\) would take places given some features or covariates \\( X \\). In", "treatment effects. That is, the learnt phenogroups have differential response to a specific", "flexible APIs allowing rapid experimentation including dataset preprocessing, regression, counterfactual estimation, clustering and", "Bug reports and pull requests are welcome. [on GitHub]: https://github.com/autonlab/auton-survival License ------- MIT", "``` @article{nagpal2022autonsurvival, url = {https://arxiv.org/abs/2204.07276}, author = {<NAME> <NAME> <NAME>}, title = {auton-survival:", "a flexible APIs allowing rapid experimentation including dataset preprocessing, regression, counterfactual estimation, clustering", "or covariates \\( X \\). In statistics and ML these scenarious are modelled", "Regression. Conference on Machine Learning for Healthcare (2021)](https://arxiv.org/abs/2101.06536)</a> ``` @inproceedings{nagpal2021dcm, title={Deep Cox mixtures", "- **Factual Phenotyping**: Involves the use of structured latent variable models, `auton_survival.models.dcm.DeepCoxMixtures` or", "dim_red_method=dim_red_method, n_components=n_components, n_clusters=n_clusters) # Fit and infer the phenogroups. phenotypes = phenotyper.fit_phenotype(features) #", "tasks. Citing and References ---------------------- Please cite the following if you use `auton-survival`:", "`FRAMINGHAM` and `PBC` dataset for survival analysis. #### `auton_survival.datasets` ```python # Load the", "Involves learning phenotypes that demonstrate heterogenous treatment effects. That is, the learnt phenogroups", "with Time-Varying Covariates. AAAI Spring Symposium (2021)](http://proceedings.mlr.press/v146/nagpal21a.html)</a> ``` @InProceedings{pmlr-v146-nagpal21a, title={Deep Parametric Time-to-Event Regression", "risk at specific time horizons. predictions = model.predict_risk(features, t=[8, 12, 16]) ``` ####", "`PBC` dataset for survival analysis. #### `auton_survival.datasets` ```python # Load the SUPPORT Dataset", "latent clusters or subgroups of patients that demonstrate similar outcomes. In the context", "Hyperparameter grid to perform Cross Validation hyperparam_grid = {'n_estimators' : [50, 100], 'max_depth'", "import SurvivalRegressionCV # Define the Hyperparameter grid to perform Cross Validation hyperparam_grid =", "algorithm on this representation. ```python from auton_survival.phenotyping import ClusteringPhenotyper # Dimensionality reduction using", "\\mathbb{P}(T>t|X) \\). As compared to typical regression problems, Survival Analysis differs in two", "clustering algorithm on this representation. ```python from auton_survival.phenotyping import ClusteringPhenotyper # Dimensionality reduction", "Data (2022)](https://arxiv.org/abs/2204.07276)</a> ``` @article{nagpal2022autonsurvival, url = {https://arxiv.org/abs/2204.07276}, author = {<NAME> <NAME> <NAME>}, title", "16]) ``` #### `auton_survival.estimators` This module provides a wrapper `auton_survival.estimators.SurvivalModel` to model survival", "to deal in the Software without restriction, including without limitation the rights to", "clustering and phenotyping and propensity adjusted evaluation. **For complete details on** `auton-survival` **see**:", "3 distinct classes, `Scaler`, `Imputer` and `Preprocessor`. The `Preprocessor` class is a composite", "parametric survival regression and representation learning for censored data with competing risks}, author={<NAME>", "``` Phenotyping and Knowledge Discovery ----------------------------------- #### `auton_survival.phenotyping` `auton_survival.phenotyping` allows extraction of latent", "ways: * The Event distribution, \\( T \\) has positive support ie. \\(", "using the SurvivalRegressionCV class model = SurvivalRegressionCV(model='rsf', cv_folds=5, hyperparam_grid=hyperparam_grid) model.fit(features, outcomes) ``` Phenotyping", "Software without restriction, including without limitation the rights to use, copy, modify, merge,", "Scale) the features features = preprocessing.Preprocessor().fit_transform(features) # Train a Deep Cox Proportional Hazards", "conditional survival distribution, \\( \\mathbb{P}(T>t|X) \\). As compared to typical regression problems, Survival", "extraction of latent clusters or subgroups of patients that demonstrate similar outcomes. In", "Data. `auton-survival` provides a flexible APIs allowing rapid experimentation including dataset preprocessing, regression,", "top-level interface to run `auton-survival` style experiments of survival analysis, involving cross-validation style", "your research: [1] [Deep Survival Machines: Fully Parametric Survival Regression and Representation Learning", "= models.cph.DeepCoxPH(layers=[100]) model.fit(features, outcomes.time, outcomes.event) # Predict risk at specific time horizons. predictions", "\\( X \\). In statistics and ML these scenarious are modelled as regression", "for projects involving censored Time-to-Event Data. `auton-survival` provides a flexible APIs allowing rapid", "a Gaussian Mixture Model (GMM) with 3 components and diagonal covariance. clustering_method, n_clusters", "survival estimate. auton_survival.reporting.plot_kaplanmeier(outcomes, phenotypes) ``` - **Factual Phenotyping**: Involves the use of structured", "\\( x \\) followed by the use of a clustering algorithm on this", "Model with `auton-survival` ```python from auton_survival import datasets, preprocessing, models # Load the", "= {https://arxiv.org/abs/2204.07276}, author = {<NAME> <NAME> <NAME>}, title = {auton-survival: an Open-Source Package", "places given some features or covariates \\( X \\). In statistics and ML", "requirements.txt ``` Compatibility ------------- `auton-survival` requires `python` 3.5+ and `pytorch` 1.1+. To evaluate", "package, we refer to this task as **phenotyping**. `auton_survival.phenotyping` allows: - **Unsupervised Phenotyping**:", "if you employ them in your research: [1] [Deep Survival Machines: Fully Parametric", "<reponame>PotosnakW/auton-survival r''' [![Build Status](https://travis-ci.org/autonlab/DeepSurvivalMachines.svg?branch=master)](https://travis-ci.org/autonlab/DeepSurvivalMachines) &nbsp;&nbsp;&nbsp; [![codecov](https://codecov.io/gh/autonlab/DeepSurvivalMachines/branch/master/graph/badge.svg?token=FU1HB5O92D)](https://codecov.io/gh/autonlab/DeepSurvivalMachines) &nbsp;&nbsp;&nbsp; [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) &nbsp;&nbsp;&nbsp; [![GitHub Repo stars](https://img.shields.io/github/stars/autonlab/auton-survival?style=social)](https://github.com/autonlab/auton-survival)", "• <a href=\"https://nbviewer.org/github/autonlab/auton-survival/tree/master/examples/\">Demo Notebooks</a></h3> What is Survival Analysis? -------------------------- **Survival Analysis** involves estimating", "the popular `SUPPORT`, `FRAMINGHAM` and `PBC` dataset for survival analysis. #### `auton_survival.datasets` ```python", "around other popular python survival analysis packages to experiment with Random Survival Forests", "free of charge, to any person obtaining a copy of this software and", "Health Informatics (2021)](https://arxiv.org/abs/2003.01176)</a> ``` @article{nagpal2021dsm, title={Deep survival machines: Fully parametric survival regression and", "and this permission notice shall be included in all copies or substantial portions", "year={2021}, organization={PMLR} } ``` [4] [Counterfactual Phenotyping with Censored Time-to-Events (2022)](https://arxiv.org/abs/2202.11089)</a> ``` @article{nagpal2022counterfactual,", "the conditional survival distribution, \\( \\mathbb{P}(T>t|X) \\). As compared to typical regression problems,", "[4] [Counterfactual Phenotyping with Censored Time-to-Events (2022)](https://arxiv.org/abs/2202.11089)</a> ``` @article{nagpal2022counterfactual, title={Counterfactual Phenotyping with Censored", "Open-Source Package for Regression, Counterfactual Estimation, Evaluation and Phenotyping with Censored Time-to-Event Data", "Train a Deep Survival Machines model using the SurvivalModel class. model = estimators.SurvivalModel(model='dsm')", "rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of", "to this task as **phenotyping**. `auton_survival.phenotyping` allows: - **Unsupervised Phenotyping**: Involves first performing", "survival analysis packages to experiment with Random Survival Forests and Weibull Accelerated Failure", "EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES", "are lost to follow up. Survival Regression ------------------- #### `auton_survival.models` Training a Deep", "Negar and <NAME>}, booktitle={Machine Learning for Healthcare Conference}, pages={674--708}, year={2021}, organization={PMLR} } ```", "year={2022} } ``` ## Installation ```console foo@bar:~$ git clone https://github.com/autonlab/auton_survival foo@bar:~$ pip install", "# Train a Deep Cox Proportional Hazards (DCPH) model model = models.cph.DeepCoxPH(layers=[100]) model.fit(features,", "Package --------------------------- The python package `auton-survival` is repository of reusable utilities for projects", "url = {https://arxiv.org/abs/2204.07276}, author = {<NAME> <NAME> <NAME>}, title = {auton-survival: an Open-Source", "License Copyright (c) 2022 Car<NAME> University, [Auton Lab](http://autonlab.org) Permission is hereby granted, free", "Spring Symposium on Survival Prediction - Algorithms, Challenges, and Applications 2021}, series={Proceedings of", "and Representation Learning for Censored Data with Competing Risks.\" IEEE Journal of Biomedical", "notice shall be included in all copies or substantial portions of the Software.", "href=\"https://www.cs.cmu.edu/~chiragn/papers/auton_survival.pdf\">White Paper</a> &nbsp;&nbsp; • <a href=\"https://autonlab.github.io/auton-survival/\">Documentation</a> &nbsp;&nbsp; • <a href=\"https://nbviewer.org/github/autonlab/auton-survival/tree/master/examples/\">Demo Notebooks</a></h3> What is", "```python # Preprocessing loaded Datasets from auton_survival import datasets features, outcomes = datasets.load_topcat()", "like the popular `SUPPORT`, `FRAMINGHAM` and `PBC` dataset for survival analysis. #### `auton_survival.datasets`", "<img align=right style=\"align:right;\" src=\"https://ndownloader.figshare.com/files/34052981\" width=30%> <br> The `auton-survival` Package --------------------------- The python package", "major ways: * The Event distribution, \\( T \\) has positive support ie.", "ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION", "and `pytorch` 1.1+. To evaluate performance using standard metrics `auton-survival` requires `scikit-survival`. Contributing", "@article{nagpal2021dsm, title={Deep survival machines: Fully parametric survival regression and representation learning for censored", "Learning for Censored Data with Competing Risks.\" IEEE Journal of Biomedical and Health", "# Load the SUPPORT Dataset outcomes, features = datasets.load_dataset(\"SUPPORT\") # Preprocess (Impute and", "heterogenous treatment effects. That is, the learnt phenogroups have differential response to a", "regression problems, Survival Analysis differs in two major ways: * The Event distribution,", "of reusable utilities for projects involving censored Time-to-Event Data. `auton-survival` provides a flexible", "you use `auton-survival`: [auton-survival: an Open-Source Package for Regression, Counterfactual Estimation, Evaluation and", "and phenotyping and propensity adjusted evaluation. **For complete details on** `auton-survival` **see**: <h3>•", "T \\) would take places given some features or covariates \\( X \\).", "when an event of interest, \\( T \\) would take places given some", "OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,", "outcomes = datasets.load_dataset('SUPPORT') ``` #### `auton_survival.preprocessing` This module provides a flexible API to", "``` [2] [Deep Parametric Time-to-Event Regression with Time-Varying Covariates. AAAI Spring Symposium (2021)](http://proceedings.mlr.press/v146/nagpal21a.html)</a>", "if you use `auton-survival`: [auton-survival: an Open-Source Package for Regression, Counterfactual Estimation, Evaluation", "publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons", "= {auton-survival: an Open-Source Package for Regression, Counterfactual Estimation, Evaluation and Phenotyping with", "Time-to-Event Data (2022)](https://arxiv.org/abs/2204.07276)</a> ``` @article{nagpal2022autonsurvival, url = {https://arxiv.org/abs/2204.07276}, author = {<NAME> <NAME> <NAME>},", "Validation Experiment. from auton_survival.experiments import SurvivalRegressionCV # Define the Hyperparameter grid to perform", "experiments of survival analysis, involving cross-validation style experiments with multiple different survival analysis", "There is presence of censoring ie. a large number of instances of data", "Mixtures for Survival Regression. Conference on Machine Learning for Healthcare (2021)](https://arxiv.org/abs/2101.06536)</a> ``` @inproceedings{nagpal2021dcm,", "competing risks}, author={<NAME> and <NAME> <NAME>}, journal={IEEE Journal of Biomedical and Health Informatics},", "projects involving censored Time-to-Event Data. `auton-survival` provides a flexible APIs allowing rapid experimentation", "Status](https://travis-ci.org/autonlab/DeepSurvivalMachines.svg?branch=master)](https://travis-ci.org/autonlab/DeepSurvivalMachines) &nbsp;&nbsp;&nbsp; [![codecov](https://codecov.io/gh/autonlab/DeepSurvivalMachines/branch/master/graph/badge.svg?token=FU1HB5O92D)](https://codecov.io/gh/autonlab/DeepSurvivalMachines) &nbsp;&nbsp;&nbsp; [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) &nbsp;&nbsp;&nbsp; [![GitHub Repo stars](https://img.shields.io/github/stars/autonlab/auton-survival?style=social)](https://github.com/autonlab/auton-survival) <img align=right style=\"align:right;\"", "from auton_survival.preprocessing import Preprocessing features = Preprocessor().fit_transform(features, cat_feats=['GENDER', 'ETHNICITY', 'SMOKE'], num_feats=['height', 'weight']) #", "`auton_survival.phenotyping` allows: - **Unsupervised Phenotyping**: Involves first performing dimensionality reduction on the inpute", "year={2021}, publisher={IEEE} } ``` [2] [Deep Parametric Time-to-Event Regression with Time-Varying Covariates. AAAI", "Proportional Hazards (DCPH) model model = models.cph.DeepCoxPH(layers=[100]) model.fit(features, outcomes.time, outcomes.event) # Predict risk", "``` @InProceedings{pmlr-v146-nagpal21a, title={Deep Parametric Time-to-Event Regression with Time-Varying Covariates}, author={<NAME> <NAME> <NAME>}, booktitle={Proceedings", "rates. - **Counterfactual Phenotyping**: Involves learning phenotypes that demonstrate heterogenous treatment effects. That", "learning models. The module has 3 distinct classes, `Scaler`, `Imputer` and `Preprocessor`. The", "outcomes.time, outcomes.event) # Predict risk at specific time horizons. predictions = model.predict_risk(features, t=[8,", "diagonal covariance. clustering_method, n_clusters = 'gmm', 3 # Initialize the phenotyper with the", "an event of interest, \\( T \\) would take places given some features", "Phenotyping**: Involves the use of structured latent variable models, `auton_survival.models.dcm.DeepCoxMixtures` or `auton_survival.models.dsm.DeepSurvivalMachines` to", "References ---------------------- Please cite the following if you use `auton-survival`: [auton-survival: an Open-Source", "above copyright notice and this permission notice shall be included in all copies", "=\"120px\" src=\"https://www.cs.cmu.edu/~chiragn/cmu_logo.jpeg\"> <img align=\"right\" height =\"110px\" src=\"https://www.cs.cmu.edu/~chiragn/auton_logo.png\"> <br><br><br><br><br> ''' __version__ = \"0.1.0\" from", "models. ```python from auton_survival import estimators # Train a Deep Survival Machines model", "<h3>• <a href=\"https://www.cs.cmu.edu/~chiragn/papers/auton_survival.pdf\">White Paper</a> &nbsp;&nbsp; • <a href=\"https://autonlab.github.io/auton-survival/\">Documentation</a> &nbsp;&nbsp; • <a href=\"https://nbviewer.org/github/autonlab/auton-survival/tree/master/examples/\">Demo Notebooks</a></h3>", "model.fit(features, outcomes.time, outcomes.event) # Predict risk at specific time horizons. predictions = model.predict_risk(features,", "**For complete details on** `auton-survival` **see**: <h3>• <a href=\"https://www.cs.cmu.edu/~chiragn/papers/auton_survival.pdf\">White Paper</a> &nbsp;&nbsp; • <a", "FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS", "@article{nagpal2022autonsurvival, url = {https://arxiv.org/abs/2204.07276}, author = {<NAME> <NAME> <NAME>}, title = {auton-survival: an", "and infer the phenogroups. phenotypes = phenotyper.fit_phenotype(features) # Plot the phenogroup specific Kaplan-Meier", "What is Survival Analysis? -------------------------- **Survival Analysis** involves estimating when an event of", "inpute covariates \\( x \\) followed by the use of a clustering algorithm", "`auton-survival` style experiments of survival analysis, involving cross-validation style experiments with multiple different", "support ie. \\( T \\in [0, \\infty) \\). * There is presence of", "dimensions. dim_red_method, = 'pca', 8 # We use a Gaussian Mixture Model (GMM)", "survival distribution, \\( \\mathbb{P}(T>t|X) \\). As compared to typical regression problems, Survival Analysis", "-r requirements.txt ``` Compatibility ------------- `auton-survival` requires `python` 3.5+ and `pytorch` 1.1+. To", "furnished to do so, subject to the following conditions: The above copyright notice", "'max_depth' : [3, 5], 'max_features' : ['sqrt', 'log2']} # Train a RSF model", "``` Compatibility ------------- `auton-survival` requires `python` 3.5+ and `pytorch` 1.1+. To evaluate performance", "permit persons to whom the Software is furnished to do so, subject to", "preprocessing, regression, counterfactual estimation, clustering and phenotyping and propensity adjusted evaluation. **For complete", "Preprocess (Impute and Scale) the features features = preprocessing.Preprocessor().fit_transform(features) # Train a Deep", "copies of the Software, and to permit persons to whom the Software is", "datasets.load_dataset(\"SUPPORT\") # Preprocess (Impute and Scale) the features features = preprocessing.Preprocessor().fit_transform(features) # Train", "\\in [0, \\infty) \\). * There is presence of censoring ie. a large", "__version__ = \"0.1.0\" from .models.dsm import DeepSurvivalMachines from .models.dcm import DeepCoxMixtures from .models.cph", "have differential response to a specific intervention. Relies on the specially designed `auton_survival.models.cmhe.DeepCoxMixturesHeterogenousEffects`", "```python # auton-survival Style Cross Validation Experiment. from auton_survival.experiments import SurvivalRegressionCV # Define", "models. The module has 3 distinct classes, `Scaler`, `Imputer` and `Preprocessor`. The `Preprocessor`", "a RSF model with cross-validation using the SurvivalRegressionCV class model = SurvivalRegressionCV(model='rsf', cv_folds=5,", "including dataset preprocessing, regression, counterfactual estimation, clustering and phenotyping and propensity adjusted evaluation.", "use a Gaussian Mixture Model (GMM) with 3 components and diagonal covariance. clustering_method,", "Training a Deep Cox Proportional Hazards Model with `auton-survival` ```python from auton_survival import", "Involves the use of structured latent variable models, `auton_survival.models.dcm.DeepCoxMixtures` or `auton_survival.models.dsm.DeepSurvivalMachines` to recover", "features, outcomes = datasets.load_topcat() from auton_survival.preprocessing import Preprocessing features = Preprocessor().fit_transform(features, cat_feats=['GENDER', 'ETHNICITY',", "1.1+. To evaluate performance using standard metrics `auton-survival` requires `scikit-survival`. Contributing ------------ `auton-survival`", "``` #### `auton_survival.experiments` Modules to perform standard survival analysis experiments. This module provides", "phenotyper = ClusteringPhenotyper(clustering_method=clustering_method, dim_red_method=dim_red_method, n_components=n_components, n_clusters=n_clusters) # Fit and infer the phenogroups. phenotypes", "{https://arxiv.org/abs/2204.07276}, author = {<NAME> <NAME> <NAME>}, title = {auton-survival: an Open-Source Package for", "for downstream machine learning models. The module has 3 distinct classes, `Scaler`, `Imputer`", "organization={PMLR} } ``` [4] [Counterfactual Phenotyping with Censored Time-to-Events (2022)](https://arxiv.org/abs/2202.11089)</a> ``` @article{nagpal2022counterfactual, title={Counterfactual", "survival regression methods. `auton_survival.estimators` also provides convenient wrappers around other popular python survival", "of interest, \\( T \\) would take places given some features or covariates", "src=\"https://ndownloader.figshare.com/files/34052981\" width=30%> <br> The `auton-survival` Package --------------------------- The python package `auton-survival` is repository", "cat_feats=['GENDER', 'ETHNICITY', 'SMOKE'], num_feats=['height', 'weight']) # The `cat_feats` and `num_feats` lists would contain", "of this software and associated documentation files (the \"Software\"), to deal in the", "Random Survival Forests and Weibull Accelerated Failure Time regression models. ```python from auton_survival", "Relies on the specially designed `auton_survival.models.cmhe.DeepCoxMixturesHeterogenousEffects` latent variable model. Dataset Loading and Preprocessing", "sell copies of the Software, and to permit persons to whom the Software", "n_clusters = 'gmm', 3 # Initialize the phenotyper with the above hyperparameters. phenotyper", "auton_survival import dataset features, outcomes = datasets.load_dataset('SUPPORT') ``` #### `auton_survival.preprocessing` This module provides", "Time regression models. ```python from auton_survival import estimators # Train a Deep Survival", "`Imputer` and `Preprocessor`. The `Preprocessor` class is a composite transform that does both", "# numerical features in the dataset. ``` Evaluation and Reporting ------------------------- #### `auton_survival.metrics`", "interface to run `auton-survival` style experiments of survival analysis, involving cross-validation style experiments", "functions to load and prerocsss various time-to-event data like the popular `SUPPORT`, `FRAMINGHAM`", "Analysis (PCA) to 8 dimensions. dim_red_method, = 'pca', 8 # We use a", "= {2022}, } ``` Additionally, models and methods in `auton_survival` come from the", "Survival Prediction - Algorithms, Challenges, and Applications 2021}, series={Proceedings of Machine Learning Research},", "ie. a large number of instances of data are lost to follow up.", "num_feats=['height', 'weight']) # The `cat_feats` and `num_feats` lists would contain all the categorical", "----------------------------------- #### `auton_survival.phenotyping` `auton_survival.phenotyping` allows extraction of latent clusters or subgroups of patients", "individual papers if you employ them in your research: [1] [Deep Survival Machines:", "Reporting ------------------------- #### `auton_survival.metrics` Helper functions to generate standard reports for common Survival", "phenotypes = phenotyper.fit_phenotype(features) # Plot the phenogroup specific Kaplan-Meier survival estimate. auton_survival.reporting.plot_kaplanmeier(outcomes, phenotypes)", "cross-validation style experiments with multiple different survival analysis models ```python # auton-survival Style", "**phenotyping**. `auton_survival.phenotyping` allows: - **Unsupervised Phenotyping**: Involves first performing dimensionality reduction on the", "similar outcomes. In the context of this package, we refer to this task", "Time-to-Events}, author={<NAME> <NAME> <NAME> <NAME>}, journal={arXiv preprint arXiv:2202.11089}, year={2022} } ``` ## Installation", "Accelerated Failure Time regression models. ```python from auton_survival import estimators # Train a", "Cross Validation hyperparam_grid = {'n_estimators' : [50, 100], 'max_depth' : [3, 5], 'max_features'", "Experiment. from auton_survival.experiments import SurvivalRegressionCV # Define the Hyperparameter grid to perform Cross", "src=\"https://www.cs.cmu.edu/~chiragn/auton_logo.png\"> <br><br><br><br><br> ''' __version__ = \"0.1.0\" from .models.dsm import DeepSurvivalMachines from .models.dcm import", "also provides convenient wrappers around other popular python survival analysis packages to experiment", "of the Software, and to permit persons to whom the Software is furnished", "variable model. Dataset Loading and Preprocessing --------------------------------- Helper functions to load and prerocsss", "(Impute and Scale) the features features = preprocessing.Preprocessor().fit_transform(features) # Train a Deep Cox", "experimentation including dataset preprocessing, regression, counterfactual estimation, clustering and phenotyping and propensity adjusted", "Parametric Time-to-Event Regression with Time-Varying Covariates. AAAI Spring Symposium (2021)](http://proceedings.mlr.press/v146/nagpal21a.html)</a> ``` @InProceedings{pmlr-v146-nagpal21a, title={Deep", "Fit and infer the phenogroups. phenotypes = phenotyper.fit_phenotype(features) # Plot the phenogroup specific", "data are lost to follow up. Survival Regression ------------------- #### `auton_survival.models` Training a", "latent variable models, `auton_survival.models.dcm.DeepCoxMixtures` or `auton_survival.models.dsm.DeepSurvivalMachines` to recover phenogroups that demonstrate differential observed", "\\) followed by the use of a clustering algorithm on this representation. ```python", "analysis methods. The use of the wrapper allows a simple standard interface for", "the dataset. ``` Evaluation and Reporting ------------------------- #### `auton_survival.metrics` Helper functions to generate", "copyright notice and this permission notice shall be included in all copies or", "5], 'max_features' : ['sqrt', 'log2']} # Train a RSF model with cross-validation using", "analysis models ```python # auton-survival Style Cross Validation Experiment. from auton_survival.experiments import SurvivalRegressionCV", "Risks.\" IEEE Journal of Biomedical and Health Informatics (2021)](https://arxiv.org/abs/2003.01176)</a> ``` @article{nagpal2021dsm, title={Deep survival", "Helper functions to load and prerocsss various time-to-event data like the popular `SUPPORT`,", "features in the dataset. ``` Evaluation and Reporting ------------------------- #### `auton_survival.metrics` Helper functions", "Package for Regression, Counterfactual Estimation, Evaluation and Phenotyping with Censored Time-to-Event Data (2022)](https://arxiv.org/abs/2204.07276)</a>", "for censored data with competing risks}, author={<NAME> and <NAME> <NAME>}, journal={IEEE Journal of", "@article{nagpal2022counterfactual, title={Counterfactual Phenotyping with Censored Time-to-Events}, author={<NAME> <NAME> <NAME> <NAME>}, journal={arXiv preprint arXiv:2202.11089},", "<img align=\"right\" height =\"110px\" src=\"https://www.cs.cmu.edu/~chiragn/auton_logo.png\"> <br><br><br><br><br> ''' __version__ = \"0.1.0\" from .models.dsm import", "(time-to-event) analysis methods. The use of the wrapper allows a simple standard interface", "to estimate the conditional survival distribution, \\( \\mathbb{P}(T>t|X) \\). As compared to typical", "Informatics}, volume={25}, number={8}, pages={3163--3175}, year={2021}, publisher={IEEE} } ``` [2] [Deep Parametric Time-to-Event Regression", "`auton-survival` is [on GitHub]. Bug reports and pull requests are welcome. [on GitHub]:", "survival analysis. #### `auton_survival.datasets` ```python # Load the SUPPORT Dataset from auton_survival import", "Train a Deep Cox Proportional Hazards (DCPH) model model = models.cph.DeepCoxPH(layers=[100]) model.fit(features, outcomes.time,", "THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR", "analysis packages to experiment with Random Survival Forests and Weibull Accelerated Failure Time", "different survival analysis models ```python # auton-survival Style Cross Validation Experiment. from auton_survival.experiments", "100], 'max_depth' : [3, 5], 'max_features' : ['sqrt', 'log2']} # Train a RSF", "= datasets.load_dataset(\"SUPPORT\") # Preprocess (Impute and Scale) the features features = preprocessing.Preprocessor().fit_transform(features) #", "GitHub]. Bug reports and pull requests are welcome. [on GitHub]: https://github.com/autonlab/auton-survival License -------", "- Algorithms, Challenges, and Applications 2021}, series={Proceedings of Machine Learning Research}, publisher={PMLR}, }", "The `cat_feats` and `num_feats` lists would contain all the categorical and # numerical", "Principal Component Analysis (PCA) to 8 dimensions. dim_red_method, = 'pca', 8 # We", "demonstrate heterogenous treatment effects. That is, the learnt phenogroups have differential response to", "model = SurvivalRegressionCV(model='rsf', cv_folds=5, hyperparam_grid=hyperparam_grid) model.fit(features, outcomes) ``` Phenotyping and Knowledge Discovery -----------------------------------", "methods in `auton_survival` come from the following papers. Please cite the individual papers", "`auton-survival` Package --------------------------- The python package `auton-survival` is repository of reusable utilities for", "``` ## Installation ```console foo@bar:~$ git clone https://github.com/autonlab/auton_survival foo@bar:~$ pip install -r requirements.txt", "distribute, sublicense, and/or sell copies of the Software, and to permit persons to", "software and associated documentation files (the \"Software\"), to deal in the Software without", "model.predict_risk(features, times=[8, 12, 16]) ``` #### `auton_survival.experiments` Modules to perform standard survival analysis", "<NAME>}, booktitle={Machine Learning for Healthcare Conference}, pages={674--708}, year={2021}, organization={PMLR} } ``` [4] [Counterfactual", "Cox Proportional Hazards Model with `auton-survival` ```python from auton_survival import datasets, preprocessing, models", "models, `auton_survival.models.dcm.DeepCoxMixtures` or `auton_survival.models.dsm.DeepSurvivalMachines` to recover phenogroups that demonstrate differential observed survival rates.", "reports for common Survival Analysis tasks. Citing and References ---------------------- Please cite the", "with standard survival (time-to-event) analysis methods. The use of the wrapper allows a", "(2021)](https://arxiv.org/abs/2003.01176)</a> ``` @article{nagpal2021dsm, title={Deep survival machines: Fully parametric survival regression and representation learning", "mixtures for survival regression}, author={<NAME> Yadlowsky, <NAME> Rostamzadeh, Negar and <NAME>}, booktitle={Machine Learning", "shall be included in all copies or substantial portions of the Software. THE", "Cox Mixtures for Survival Regression. Conference on Machine Learning for Healthcare (2021)](https://arxiv.org/abs/2101.06536)</a> ```", "NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,", "and `Preprocessor`. The `Preprocessor` class is a composite transform that does both Imputing", "Prediction - Algorithms, Challenges, and Applications 2021}, series={Proceedings of Machine Learning Research}, publisher={PMLR},", "specific time horizons. predictions = model.predict_risk(features, t=[8, 12, 16]) ``` #### `auton_survival.estimators` This", "with cross-validation using the SurvivalRegressionCV class model = SurvivalRegressionCV(model='rsf', cv_folds=5, hyperparam_grid=hyperparam_grid) model.fit(features, outcomes)", "estimate. auton_survival.reporting.plot_kaplanmeier(outcomes, phenotypes) ``` - **Factual Phenotyping**: Involves the use of structured latent", "Fully Parametric Survival Regression and Representation Learning for Censored Data with Competing Risks.\"", "Event distribution, \\( T \\) has positive support ie. \\( T \\in [0,", "model.predict_risk(features, t=[8, 12, 16]) ``` #### `auton_survival.estimators` This module provides a wrapper `auton_survival.estimators.SurvivalModel`", "performing dimensionality reduction on the inpute covariates \\( x \\) followed by the", "representation. ```python from auton_survival.phenotyping import ClusteringPhenotyper # Dimensionality reduction using Principal Component Analysis", "Survival Machines model using the SurvivalModel class. model = estimators.SurvivalModel(model='dsm') model.fit(features, outcomes) #", "model with cross-validation using the SurvivalRegressionCV class model = SurvivalRegressionCV(model='rsf', cv_folds=5, hyperparam_grid=hyperparam_grid) model.fit(features,", "this package, we refer to this task as **phenotyping**. `auton_survival.phenotyping` allows: - **Unsupervised", "and <NAME> <NAME>}, journal={IEEE Journal of Biomedical and Health Informatics}, volume={25}, number={8}, pages={3163--3175},", "is hereby granted, free of charge, to any person obtaining a copy of", "multiple different survival regression methods. `auton_survival.estimators` also provides convenient wrappers around other popular", "perform imputation and data normalization for downstream machine learning models. The module has", "of data are lost to follow up. Survival Regression ------------------- #### `auton_survival.models` Training", "provides a wrapper `auton_survival.estimators.SurvivalModel` to model survival datasets with standard survival (time-to-event) analysis", "publisher={PMLR}, } ``` [3] [Deep Cox Mixtures for Survival Regression. Conference on Machine", "We use a Gaussian Mixture Model (GMM) with 3 components and diagonal covariance.", "research: [1] [Deep Survival Machines: Fully Parametric Survival Regression and Representation Learning for", "use of a clustering algorithm on this representation. ```python from auton_survival.phenotyping import ClusteringPhenotyper", "features features = preprocessing.Preprocessor().fit_transform(features) # Train a Deep Cox Proportional Hazards (DCPH) model", "`auton_survival.models` Training a Deep Cox Proportional Hazards Model with `auton-survival` ```python from auton_survival", "[0, \\infty) \\). * There is presence of censoring ie. a large number", "the Hyperparameter grid to perform Cross Validation hyperparam_grid = {'n_estimators' : [50, 100],", "OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS", "datasets.load_topcat() from auton_survival.preprocessing import Preprocessing features = Preprocessor().fit_transform(features, cat_feats=['GENDER', 'ETHNICITY', 'SMOKE'], num_feats=['height', 'weight'])", "observed survival rates. - **Counterfactual Phenotyping**: Involves learning phenotypes that demonstrate heterogenous treatment", "Analysis? -------------------------- **Survival Analysis** involves estimating when an event of interest, \\( T", "Preprocessing loaded Datasets from auton_survival import datasets features, outcomes = datasets.load_topcat() from auton_survival.preprocessing", "learning phenotypes that demonstrate heterogenous treatment effects. That is, the learnt phenogroups have", "style experiments of survival analysis, involving cross-validation style experiments with multiple different survival", "model.fit(features, outcomes) # Predict risk at time horizons. predictions = model.predict_risk(features, times=[8, 12,", "Covariates}, author={<NAME> <NAME> <NAME>}, booktitle={Proceedings of AAAI Spring Symposium on Survival Prediction -", "them in your research: [1] [Deep Survival Machines: Fully Parametric Survival Regression and", "Health Informatics}, volume={25}, number={8}, pages={3163--3175}, year={2021}, publisher={IEEE} } ``` [2] [Deep Parametric Time-to-Event", "AAAI Spring Symposium (2021)](http://proceedings.mlr.press/v146/nagpal21a.html)</a> ``` @InProceedings{pmlr-v146-nagpal21a, title={Deep Parametric Time-to-Event Regression with Time-Varying Covariates},", "this task as **phenotyping**. `auton_survival.phenotyping` allows: - **Unsupervised Phenotyping**: Involves first performing dimensionality", "instances of data are lost to follow up. Survival Regression ------------------- #### `auton_survival.models`", "`auton_survival.phenotyping` allows extraction of latent clusters or subgroups of patients that demonstrate similar", "performance using standard metrics `auton-survival` requires `scikit-survival`. Contributing ------------ `auton-survival` is [on GitHub].", "'ETHNICITY', 'SMOKE'], num_feats=['height', 'weight']) # The `cat_feats` and `num_feats` lists would contain all", "<NAME> Rostamzadeh, Negar and <NAME>}, booktitle={Machine Learning for Healthcare Conference}, pages={674--708}, year={2021}, organization={PMLR}", "USE OR OTHER DEALINGS IN THE SOFTWARE. <img align=\"right\" height =\"120px\" src=\"https://www.cs.cmu.edu/~chiragn/cmu_logo.jpeg\"> <img", "experiments with multiple different survival analysis models ```python # auton-survival Style Cross Validation", "`auton_survival.metrics` Helper functions to generate standard reports for common Survival Analysis tasks. Citing", "[![Build Status](https://travis-ci.org/autonlab/DeepSurvivalMachines.svg?branch=master)](https://travis-ci.org/autonlab/DeepSurvivalMachines) &nbsp;&nbsp;&nbsp; [![codecov](https://codecov.io/gh/autonlab/DeepSurvivalMachines/branch/master/graph/badge.svg?token=FU1HB5O92D)](https://codecov.io/gh/autonlab/DeepSurvivalMachines) &nbsp;&nbsp;&nbsp; [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) &nbsp;&nbsp;&nbsp; [![GitHub Repo stars](https://img.shields.io/github/stars/autonlab/auton-survival?style=social)](https://github.com/autonlab/auton-survival) <img align=right", "`auton_survival` come from the following papers. Please cite the individual papers if you", "the phenogroup specific Kaplan-Meier survival estimate. auton_survival.reporting.plot_kaplanmeier(outcomes, phenotypes) ``` - **Factual Phenotyping**: Involves", "using the SurvivalModel class. model = estimators.SurvivalModel(model='dsm') model.fit(features, outcomes) # Predict risk at", "different survival regression methods. `auton_survival.estimators` also provides convenient wrappers around other popular python", "\\) has positive support ie. \\( T \\in [0, \\infty) \\). * There", "= estimators.SurvivalModel(model='dsm') model.fit(features, outcomes) # Predict risk at time horizons. predictions = model.predict_risk(features,", "merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit", "Time-to-Event Regression with Time-Varying Covariates. AAAI Spring Symposium (2021)](http://proceedings.mlr.press/v146/nagpal21a.html)</a> ``` @InProceedings{pmlr-v146-nagpal21a, title={Deep Parametric", "come from the following papers. Please cite the individual papers if you employ", "module provides a top-level interface to run `auton-survival` style experiments of survival analysis,", "Cox mixtures for survival regression}, author={<NAME> Yadlowsky, <NAME> Rostamzadeh, Negar and <NAME>}, booktitle={Machine", "= model.predict_risk(features, t=[8, 12, 16]) ``` #### `auton_survival.estimators` This module provides a wrapper", "'pca', 8 # We use a Gaussian Mixture Model (GMM) with 3 components", "phenotyper.fit_phenotype(features) # Plot the phenogroup specific Kaplan-Meier survival estimate. auton_survival.reporting.plot_kaplanmeier(outcomes, phenotypes) ``` -", "[1] [Deep Survival Machines: Fully Parametric Survival Regression and Representation Learning for Censored", "is repository of reusable utilities for projects involving censored Time-to-Event Data. `auton-survival` provides", "--------------------------------- Helper functions to load and prerocsss various time-to-event data like the popular", "ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT", "intervention. Relies on the specially designed `auton_survival.models.cmhe.DeepCoxMixturesHeterogenousEffects` latent variable model. Dataset Loading and", "-------------------------- **Survival Analysis** involves estimating when an event of interest, \\( T \\)", "with Time-Varying Covariates}, author={<NAME> <NAME> <NAME>}, booktitle={Proceedings of AAAI Spring Symposium on Survival", "<NAME> <NAME>}, journal={IEEE Journal of Biomedical and Health Informatics}, volume={25}, number={8}, pages={3163--3175}, year={2021},", "WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT", "LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF", "3.5+ and `pytorch` 1.1+. To evaluate performance using standard metrics `auton-survival` requires `scikit-survival`.", "phenotyper with the above hyperparameters. phenotyper = ClusteringPhenotyper(clustering_method=clustering_method, dim_red_method=dim_red_method, n_components=n_components, n_clusters=n_clusters) # Fit", "standard metrics `auton-survival` requires `scikit-survival`. Contributing ------------ `auton-survival` is [on GitHub]. Bug reports", "TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE", "The `auton-survival` Package --------------------------- The python package `auton-survival` is repository of reusable utilities", "package `auton-survival` is repository of reusable utilities for projects involving censored Time-to-Event Data.", "Covariates. AAAI Spring Symposium (2021)](http://proceedings.mlr.press/v146/nagpal21a.html)</a> ``` @InProceedings{pmlr-v146-nagpal21a, title={Deep Parametric Time-to-Event Regression with Time-Varying", "<NAME> <NAME>}, booktitle={Proceedings of AAAI Spring Symposium on Survival Prediction - Algorithms, Challenges,", "import datasets features, outcomes = datasets.load_topcat() from auton_survival.preprocessing import Preprocessing features = Preprocessor().fit_transform(features,", "both Imputing ***and*** Scaling with a single function call. ```python # Preprocessing loaded", "charge, to any person obtaining a copy of this software and associated documentation", "functions to generate standard reports for common Survival Analysis tasks. Citing and References", "normalization for downstream machine learning models. The module has 3 distinct classes, `Scaler`,", "pull requests are welcome. [on GitHub]: https://github.com/autonlab/auton-survival License ------- MIT License Copyright (c)", "KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,", "number of instances of data are lost to follow up. Survival Regression -------------------", "Learning Research}, publisher={PMLR}, } ``` [3] [Deep Cox Mixtures for Survival Regression. Conference", "estimators # Train a Deep Survival Machines model using the SurvivalModel class. model", "`auton_survival.estimators` also provides convenient wrappers around other popular python survival analysis packages to", "analysis. #### `auton_survival.datasets` ```python # Load the SUPPORT Dataset from auton_survival import dataset", "persons to whom the Software is furnished to do so, subject to the", "IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY", "= 'pca', 8 # We use a Gaussian Mixture Model (GMM) with 3", "that demonstrate similar outcomes. In the context of this package, we refer to", "NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND", "the above hyperparameters. phenotyper = ClusteringPhenotyper(clustering_method=clustering_method, dim_red_method=dim_red_method, n_components=n_components, n_clusters=n_clusters) # Fit and infer", "\\( T \\) has positive support ie. \\( T \\in [0, \\infty) \\).", "Healthcare (2021)](https://arxiv.org/abs/2101.06536)</a> ``` @inproceedings{nagpal2021dcm, title={Deep Cox mixtures for survival regression}, author={<NAME> Yadlowsky, <NAME>", "n_components=n_components, n_clusters=n_clusters) # Fit and infer the phenogroups. phenotypes = phenotyper.fit_phenotype(features) # Plot", "Counterfactual Estimation, Evaluation and Phenotyping with Censored Time-to-Event Data}, publisher = {arXiv}, year", "= {arXiv}, year = {2022}, } ``` Additionally, models and methods in `auton_survival`", "3 # Initialize the phenotyper with the above hyperparameters. phenotyper = ClusteringPhenotyper(clustering_method=clustering_method, dim_red_method=dim_red_method,", "<NAME> <NAME>}, journal={arXiv preprint arXiv:2202.11089}, year={2022} } ``` ## Installation ```console foo@bar:~$ git", "to whom the Software is furnished to do so, subject to the following", "foo@bar:~$ git clone https://github.com/autonlab/auton_survival foo@bar:~$ pip install -r requirements.txt ``` Compatibility ------------- `auton-survival`", "Censored Time-to-Event Data (2022)](https://arxiv.org/abs/2204.07276)</a> ``` @article{nagpal2022autonsurvival, url = {https://arxiv.org/abs/2204.07276}, author = {<NAME> <NAME>", "and Phenotyping with Censored Time-to-Event Data (2022)](https://arxiv.org/abs/2204.07276)</a> ``` @article{nagpal2022autonsurvival, url = {https://arxiv.org/abs/2204.07276}, author", "standard survival analysis experiments. This module provides a top-level interface to run `auton-survival`", "from .models.dcm import DeepCoxMixtures from .models.cph import DeepCoxPH, DeepRecurrentCoxPH from .models.cmhe import DeepCoxMixturesHeterogenousEffects", "ClusteringPhenotyper # Dimensionality reduction using Principal Component Analysis (PCA) to 8 dimensions. dim_red_method,", "use of structured latent variable models, `auton_survival.models.dcm.DeepCoxMixtures` or `auton_survival.models.dsm.DeepSurvivalMachines` to recover phenogroups that", "in the Software without restriction, including without limitation the rights to use, copy,", "first performing dimensionality reduction on the inpute covariates \\( x \\) followed by", "time-to-event data like the popular `SUPPORT`, `FRAMINGHAM` and `PBC` dataset for survival analysis.", "distinct classes, `Scaler`, `Imputer` and `Preprocessor`. The `Preprocessor` class is a composite transform", "module provides a flexible API to perform imputation and data normalization for downstream", "interface for multiple different survival regression methods. `auton_survival.estimators` also provides convenient wrappers around", "ML these scenarious are modelled as regression to estimate the conditional survival distribution,", "from .models.dsm import DeepSurvivalMachines from .models.dcm import DeepCoxMixtures from .models.cph import DeepCoxPH, DeepRecurrentCoxPH", "distribution, \\( T \\) has positive support ie. \\( T \\in [0, \\infty)", "predictions = model.predict_risk(features, times=[8, 12, 16]) ``` #### `auton_survival.experiments` Modules to perform standard", "SOFTWARE. <img align=\"right\" height =\"120px\" src=\"https://www.cs.cmu.edu/~chiragn/cmu_logo.jpeg\"> <img align=\"right\" height =\"110px\" src=\"https://www.cs.cmu.edu/~chiragn/auton_logo.png\"> <br><br><br><br><br> '''", "Please cite the individual papers if you employ them in your research: [1]", "OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. <img align=\"right\" height =\"120px\"", "and prerocsss various time-to-event data like the popular `SUPPORT`, `FRAMINGHAM` and `PBC` dataset", "Healthcare Conference}, pages={674--708}, year={2021}, organization={PMLR} } ``` [4] [Counterfactual Phenotyping with Censored Time-to-Events", "loaded Datasets from auton_survival import datasets features, outcomes = datasets.load_topcat() from auton_survival.preprocessing import", "and Reporting ------------------------- #### `auton_survival.metrics` Helper functions to generate standard reports for common", "a composite transform that does both Imputing ***and*** Scaling with a single function", "Imputing ***and*** Scaling with a single function call. ```python # Preprocessing loaded Datasets", "refer to this task as **phenotyping**. `auton_survival.phenotyping` allows: - **Unsupervised Phenotyping**: Involves first", "Censored Time-to-Events (2022)](https://arxiv.org/abs/2202.11089)</a> ``` @article{nagpal2022counterfactual, title={Counterfactual Phenotyping with Censored Time-to-Events}, author={<NAME> <NAME> <NAME>", "with 3 components and diagonal covariance. clustering_method, n_clusters = 'gmm', 3 # Initialize", "------------ `auton-survival` is [on GitHub]. Bug reports and pull requests are welcome. [on", "these scenarious are modelled as regression to estimate the conditional survival distribution, \\(", "`auton-survival` **see**: <h3>• <a href=\"https://www.cs.cmu.edu/~chiragn/papers/auton_survival.pdf\">White Paper</a> &nbsp;&nbsp; • <a href=\"https://autonlab.github.io/auton-survival/\">Documentation</a> &nbsp;&nbsp; • <a", "all the categorical and # numerical features in the dataset. ``` Evaluation and", "`cat_feats` and `num_feats` lists would contain all the categorical and # numerical features", "# Initialize the phenotyper with the above hyperparameters. phenotyper = ClusteringPhenotyper(clustering_method=clustering_method, dim_red_method=dim_red_method, n_components=n_components,", "of censoring ie. a large number of instances of data are lost to", "Dimensionality reduction using Principal Component Analysis (PCA) to 8 dimensions. dim_red_method, = 'pca',", "author={<NAME> Yadlowsky, <NAME> Rostamzadeh, Negar and <NAME>}, booktitle={Machine Learning for Healthcare Conference}, pages={674--708},", "the features features = preprocessing.Preprocessor().fit_transform(features) # Train a Deep Cox Proportional Hazards (DCPH)", "on the inpute covariates \\( x \\) followed by the use of a", "with Censored Time-to-Event Data (2022)](https://arxiv.org/abs/2204.07276)</a> ``` @article{nagpal2022autonsurvival, url = {https://arxiv.org/abs/2204.07276}, author = {<NAME>", "associated documentation files (the \"Software\"), to deal in the Software without restriction, including", "Dataset Loading and Preprocessing --------------------------------- Helper functions to load and prerocsss various time-to-event", "BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE", "cross-validation using the SurvivalRegressionCV class model = SurvivalRegressionCV(model='rsf', cv_folds=5, hyperparam_grid=hyperparam_grid) model.fit(features, outcomes) ```", "This module provides a flexible API to perform imputation and data normalization for", "grid to perform Cross Validation hyperparam_grid = {'n_estimators' : [50, 100], 'max_depth' :", "Package for Regression, Counterfactual Estimation, Evaluation and Phenotyping with Censored Time-to-Event Data}, publisher", "regression to estimate the conditional survival distribution, \\( \\mathbb{P}(T>t|X) \\). As compared to", "use of the wrapper allows a simple standard interface for multiple different survival", "a Deep Cox Proportional Hazards Model with `auton-survival` ```python from auton_survival import datasets,", "auton_survival import datasets features, outcomes = datasets.load_topcat() from auton_survival.preprocessing import Preprocessing features =", "obtaining a copy of this software and associated documentation files (the \"Software\"), to", "estimate the conditional survival distribution, \\( \\mathbb{P}(T>t|X) \\). As compared to typical regression", "The module has 3 distinct classes, `Scaler`, `Imputer` and `Preprocessor`. The `Preprocessor` class", "Machine Learning for Healthcare (2021)](https://arxiv.org/abs/2101.06536)</a> ``` @inproceedings{nagpal2021dcm, title={Deep Cox mixtures for survival regression},", "models and methods in `auton_survival` come from the following papers. Please cite the", "the specially designed `auton_survival.models.cmhe.DeepCoxMixturesHeterogenousEffects` latent variable model. Dataset Loading and Preprocessing --------------------------------- Helper", "specially designed `auton_survival.models.cmhe.DeepCoxMixturesHeterogenousEffects` latent variable model. Dataset Loading and Preprocessing --------------------------------- Helper functions", "import datasets, preprocessing, models # Load the SUPPORT Dataset outcomes, features = datasets.load_dataset(\"SUPPORT\")", "designed `auton_survival.models.cmhe.DeepCoxMixturesHeterogenousEffects` latent variable model. Dataset Loading and Preprocessing --------------------------------- Helper functions to", "As compared to typical regression problems, Survival Analysis differs in two major ways:", "Evaluation and Phenotyping with Censored Time-to-Event Data}, publisher = {arXiv}, year = {2022},", "Load the SUPPORT Dataset from auton_survival import dataset features, outcomes = datasets.load_dataset('SUPPORT') ```", "[3, 5], 'max_features' : ['sqrt', 'log2']} # Train a RSF model with cross-validation", "requires `python` 3.5+ and `pytorch` 1.1+. To evaluate performance using standard metrics `auton-survival`", "including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,", "adjusted evaluation. **For complete details on** `auton-survival` **see**: <h3>• <a href=\"https://www.cs.cmu.edu/~chiragn/papers/auton_survival.pdf\">White Paper</a> &nbsp;&nbsp;", "in the dataset. ``` Evaluation and Reporting ------------------------- #### `auton_survival.metrics` Helper functions to", "or substantial portions of the Software. THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT", "contain all the categorical and # numerical features in the dataset. ``` Evaluation", "papers if you employ them in your research: [1] [Deep Survival Machines: Fully", "horizons. predictions = model.predict_risk(features, times=[8, 12, 16]) ``` #### `auton_survival.experiments` Modules to perform", "# Fit and infer the phenogroups. phenotypes = phenotyper.fit_phenotype(features) # Plot the phenogroup", "Time-Varying Covariates}, author={<NAME> <NAME> <NAME>}, booktitle={Proceedings of AAAI Spring Symposium on Survival Prediction", "hyperparam_grid = {'n_estimators' : [50, 100], 'max_depth' : [3, 5], 'max_features' : ['sqrt',", "style experiments with multiple different survival analysis models ```python # auton-survival Style Cross", "[3] [Deep Cox Mixtures for Survival Regression. Conference on Machine Learning for Healthcare", "`auton-survival` requires `scikit-survival`. Contributing ------------ `auton-survival` is [on GitHub]. Bug reports and pull", "model survival datasets with standard survival (time-to-event) analysis methods. The use of the", "and Preprocessing --------------------------------- Helper functions to load and prerocsss various time-to-event data like", "style=\"align:right;\" src=\"https://ndownloader.figshare.com/files/34052981\" width=30%> <br> The `auton-survival` Package --------------------------- The python package `auton-survival` is", "The use of the wrapper allows a simple standard interface for multiple different", "model = models.cph.DeepCoxPH(layers=[100]) model.fit(features, outcomes.time, outcomes.event) # Predict risk at specific time horizons.", "from auton_survival.experiments import SurvivalRegressionCV # Define the Hyperparameter grid to perform Cross Validation", "Component Analysis (PCA) to 8 dimensions. dim_red_method, = 'pca', 8 # We use", "COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN", "and ML these scenarious are modelled as regression to estimate the conditional survival", "follow up. Survival Regression ------------------- #### `auton_survival.models` Training a Deep Cox Proportional Hazards", "ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE", "SUPPORT Dataset outcomes, features = datasets.load_dataset(\"SUPPORT\") # Preprocess (Impute and Scale) the features", "{<NAME> <NAME> <NAME>}, title = {auton-survival: an Open-Source Package for Regression, Counterfactual Estimation,", "Yadlowsky, <NAME> Rostamzadeh, Negar and <NAME>}, booktitle={Machine Learning for Healthcare Conference}, pages={674--708}, year={2021},", "provides a flexible API to perform imputation and data normalization for downstream machine", "**see**: <h3>• <a href=\"https://www.cs.cmu.edu/~chiragn/papers/auton_survival.pdf\">White Paper</a> &nbsp;&nbsp; • <a href=\"https://autonlab.github.io/auton-survival/\">Documentation</a> &nbsp;&nbsp; • <a href=\"https://nbviewer.org/github/autonlab/auton-survival/tree/master/examples/\">Demo", "``` [3] [Deep Cox Mixtures for Survival Regression. Conference on Machine Learning for", "with Censored Time-to-Events (2022)](https://arxiv.org/abs/2202.11089)</a> ``` @article{nagpal2022counterfactual, title={Counterfactual Phenotyping with Censored Time-to-Events}, author={<NAME> <NAME>", "title = {auton-survival: an Open-Source Package for Regression, Counterfactual Estimation, Evaluation and Phenotyping", "data like the popular `SUPPORT`, `FRAMINGHAM` and `PBC` dataset for survival analysis. ####", "standard survival (time-to-event) analysis methods. The use of the wrapper allows a simple", "presence of censoring ie. a large number of instances of data are lost", "context of this package, we refer to this task as **phenotyping**. `auton_survival.phenotyping` allows:", "the following conditions: The above copyright notice and this permission notice shall be", "compared to typical regression problems, Survival Analysis differs in two major ways: *", "(GMM) with 3 components and diagonal covariance. clustering_method, n_clusters = 'gmm', 3 #", "Phenotyping**: Involves learning phenotypes that demonstrate heterogenous treatment effects. That is, the learnt", "import ClusteringPhenotyper # Dimensionality reduction using Principal Component Analysis (PCA) to 8 dimensions.", "------------------------- #### `auton_survival.metrics` Helper functions to generate standard reports for common Survival Analysis", "module provides a wrapper `auton_survival.estimators.SurvivalModel` to model survival datasets with standard survival (time-to-event)", "for common Survival Analysis tasks. Citing and References ---------------------- Please cite the following", "dataset preprocessing, regression, counterfactual estimation, clustering and phenotyping and propensity adjusted evaluation. **For", "Survival Regression and Representation Learning for Censored Data with Competing Risks.\" IEEE Journal", "------------------- #### `auton_survival.models` Training a Deep Cox Proportional Hazards Model with `auton-survival` ```python", "perform standard survival analysis experiments. This module provides a top-level interface to run", "t=[8, 12, 16]) ``` #### `auton_survival.estimators` This module provides a wrapper `auton_survival.estimators.SurvivalModel` to", "popular `SUPPORT`, `FRAMINGHAM` and `PBC` dataset for survival analysis. #### `auton_survival.datasets` ```python #", "CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.", "`python` 3.5+ and `pytorch` 1.1+. To evaluate performance using standard metrics `auton-survival` requires", "details on** `auton-survival` **see**: <h3>• <a href=\"https://www.cs.cmu.edu/~chiragn/papers/auton_survival.pdf\">White Paper</a> &nbsp;&nbsp; • <a href=\"https://autonlab.github.io/auton-survival/\">Documentation</a> &nbsp;&nbsp;", "included in all copies or substantial portions of the Software. THE SOFTWARE IS", "features = preprocessing.Preprocessor().fit_transform(features) # Train a Deep Cox Proportional Hazards (DCPH) model model", "# The `cat_feats` and `num_feats` lists would contain all the categorical and #", "you employ them in your research: [1] [Deep Survival Machines: Fully Parametric Survival", "```python from auton_survival import datasets, preprocessing, models # Load the SUPPORT Dataset outcomes,", "from the following papers. Please cite the individual papers if you employ them", "use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,", "differs in two major ways: * The Event distribution, \\( T \\) has", "(DCPH) model model = models.cph.DeepCoxPH(layers=[100]) model.fit(features, outcomes.time, outcomes.event) # Predict risk at specific", "in `auton_survival` come from the following papers. Please cite the individual papers if", "NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR", "Regression with Time-Varying Covariates}, author={<NAME> <NAME> <NAME>}, booktitle={Proceedings of AAAI Spring Symposium on", "Symposium (2021)](http://proceedings.mlr.press/v146/nagpal21a.html)</a> ``` @InProceedings{pmlr-v146-nagpal21a, title={Deep Parametric Time-to-Event Regression with Time-Varying Covariates}, author={<NAME> <NAME>", "= {'n_estimators' : [50, 100], 'max_depth' : [3, 5], 'max_features' : ['sqrt', 'log2']}", "simple standard interface for multiple different survival regression methods. `auton_survival.estimators` also provides convenient", "latent variable model. Dataset Loading and Preprocessing --------------------------------- Helper functions to load and", "\"Software\"), to deal in the Software without restriction, including without limitation the rights", "deal in the Software without restriction, including without limitation the rights to use,", "a wrapper `auton_survival.estimators.SurvivalModel` to model survival datasets with standard survival (time-to-event) analysis methods.", "Symposium on Survival Prediction - Algorithms, Challenges, and Applications 2021}, series={Proceedings of Machine", "AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE", "Phenotyping**: Involves first performing dimensionality reduction on the inpute covariates \\( x \\)", "survival regression}, author={<NAME> Yadlowsky, <NAME> Rostamzadeh, Negar and <NAME>}, booktitle={Machine Learning for Healthcare", "**Factual Phenotyping**: Involves the use of structured latent variable models, `auton_survival.models.dcm.DeepCoxMixtures` or `auton_survival.models.dsm.DeepSurvivalMachines`", "module has 3 distinct classes, `Scaler`, `Imputer` and `Preprocessor`. The `Preprocessor` class is", "convenient wrappers around other popular python survival analysis packages to experiment with Random", "`pytorch` 1.1+. To evaluate performance using standard metrics `auton-survival` requires `scikit-survival`. Contributing ------------", "This module provides a top-level interface to run `auton-survival` style experiments of survival", "outcomes.event) # Predict risk at specific time horizons. predictions = model.predict_risk(features, t=[8, 12,", "the following papers. Please cite the individual papers if you employ them in", "effects. That is, the learnt phenogroups have differential response to a specific intervention.", "to perform standard survival analysis experiments. This module provides a top-level interface to", "Phenotyping with Censored Time-to-Events (2022)](https://arxiv.org/abs/2202.11089)</a> ``` @article{nagpal2022counterfactual, title={Counterfactual Phenotyping with Censored Time-to-Events}, author={<NAME>", "for survival analysis. #### `auton_survival.datasets` ```python # Load the SUPPORT Dataset from auton_survival", "https://github.com/autonlab/auton-survival License ------- MIT License Copyright (c) 2022 Car<NAME> University, [Auton Lab](http://autonlab.org) Permission", "outcomes. In the context of this package, we refer to this task as", "arXiv:2202.11089}, year={2022} } ``` ## Installation ```console foo@bar:~$ git clone https://github.com/autonlab/auton_survival foo@bar:~$ pip", "Lab](http://autonlab.org) Permission is hereby granted, free of charge, to any person obtaining a", "so, subject to the following conditions: The above copyright notice and this permission", "auton-survival Style Cross Validation Experiment. from auton_survival.experiments import SurvivalRegressionCV # Define the Hyperparameter", "Additionally, models and methods in `auton_survival` come from the following papers. Please cite", "to load and prerocsss various time-to-event data like the popular `SUPPORT`, `FRAMINGHAM` and", "Train a RSF model with cross-validation using the SurvivalRegressionCV class model = SurvivalRegressionCV(model='rsf',", "with Censored Time-to-Event Data}, publisher = {arXiv}, year = {2022}, } ``` Additionally,", "[Deep Parametric Time-to-Event Regression with Time-Varying Covariates. AAAI Spring Symposium (2021)](http://proceedings.mlr.press/v146/nagpal21a.html)</a> ``` @InProceedings{pmlr-v146-nagpal21a,", "author={<NAME> <NAME> <NAME>}, booktitle={Proceedings of AAAI Spring Symposium on Survival Prediction - Algorithms,", "&nbsp;&nbsp; • <a href=\"https://nbviewer.org/github/autonlab/auton-survival/tree/master/examples/\">Demo Notebooks</a></h3> What is Survival Analysis? -------------------------- **Survival Analysis** involves", "Time-Varying Covariates. AAAI Spring Symposium (2021)](http://proceedings.mlr.press/v146/nagpal21a.html)</a> ``` @InProceedings{pmlr-v146-nagpal21a, title={Deep Parametric Time-to-Event Regression with", "function call. ```python # Preprocessing loaded Datasets from auton_survival import datasets features, outcomes", "Survival Analysis tasks. Citing and References ---------------------- Please cite the following if you", "perform Cross Validation hyperparam_grid = {'n_estimators' : [50, 100], 'max_depth' : [3, 5],", "```python # Load the SUPPORT Dataset from auton_survival import dataset features, outcomes =", "Compatibility ------------- `auton-survival` requires `python` 3.5+ and `pytorch` 1.1+. To evaluate performance using", "python package `auton-survival` is repository of reusable utilities for projects involving censored Time-to-Event", "``` - **Factual Phenotyping**: Involves the use of structured latent variable models, `auton_survival.models.dcm.DeepCoxMixtures`", "`auton-survival` requires `python` 3.5+ and `pytorch` 1.1+. To evaluate performance using standard metrics", "#### `auton_survival.models` Training a Deep Cox Proportional Hazards Model with `auton-survival` ```python from", "papers. Please cite the individual papers if you employ them in your research:", "survival (time-to-event) analysis methods. The use of the wrapper allows a simple standard", "Phenotyping with Censored Time-to-Events}, author={<NAME> <NAME> <NAME> <NAME>}, journal={arXiv preprint arXiv:2202.11089}, year={2022} }", "outcomes, features = datasets.load_dataset(\"SUPPORT\") # Preprocess (Impute and Scale) the features features =", "`Preprocessor`. The `Preprocessor` class is a composite transform that does both Imputing ***and***", "with `auton-survival` ```python from auton_survival import datasets, preprocessing, models # Load the SUPPORT", "and diagonal covariance. clustering_method, n_clusters = 'gmm', 3 # Initialize the phenotyper with", "Fully parametric survival regression and representation learning for censored data with competing risks},", "evaluation. **For complete details on** `auton-survival` **see**: <h3>• <a href=\"https://www.cs.cmu.edu/~chiragn/papers/auton_survival.pdf\">White Paper</a> &nbsp;&nbsp; •", "times=[8, 12, 16]) ``` #### `auton_survival.experiments` Modules to perform standard survival analysis experiments.", "Phenotyping with Censored Time-to-Event Data}, publisher = {arXiv}, year = {2022}, } ```", "conditions: The above copyright notice and this permission notice shall be included in", "THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO", "the phenogroups. phenotypes = phenotyper.fit_phenotype(features) # Plot the phenogroup specific Kaplan-Meier survival estimate.", "Proportional Hazards Model with `auton-survival` ```python from auton_survival import datasets, preprocessing, models #", "two major ways: * The Event distribution, \\( T \\) has positive support", "from auton_survival import datasets features, outcomes = datasets.load_topcat() from auton_survival.preprocessing import Preprocessing features", "model. Dataset Loading and Preprocessing --------------------------------- Helper functions to load and prerocsss various", "<NAME>}, journal={arXiv preprint arXiv:2202.11089}, year={2022} } ``` ## Installation ```console foo@bar:~$ git clone", "preprint arXiv:2202.11089}, year={2022} } ``` ## Installation ```console foo@bar:~$ git clone https://github.com/autonlab/auton_survival foo@bar:~$", "Survival Analysis? -------------------------- **Survival Analysis** involves estimating when an event of interest, \\(", "12, 16]) ``` #### `auton_survival.estimators` This module provides a wrapper `auton_survival.estimators.SurvivalModel` to model", "FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR", "allows extraction of latent clusters or subgroups of patients that demonstrate similar outcomes.", "Survival Machines: Fully Parametric Survival Regression and Representation Learning for Censored Data with", "data with competing risks}, author={<NAME> and <NAME> <NAME>}, journal={IEEE Journal of Biomedical and", "horizons. predictions = model.predict_risk(features, t=[8, 12, 16]) ``` #### `auton_survival.estimators` This module provides", "Hazards Model with `auton-survival` ```python from auton_survival import datasets, preprocessing, models # Load", "[50, 100], 'max_depth' : [3, 5], 'max_features' : ['sqrt', 'log2']} # Train a", "censored Time-to-Event Data. `auton-survival` provides a flexible APIs allowing rapid experimentation including dataset", "booktitle={Machine Learning for Healthcare Conference}, pages={674--708}, year={2021}, organization={PMLR} } ``` [4] [Counterfactual Phenotyping", "imputation and data normalization for downstream machine learning models. The module has 3", "&nbsp;&nbsp;&nbsp; [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) &nbsp;&nbsp;&nbsp; [![GitHub Repo stars](https://img.shields.io/github/stars/autonlab/auton-survival?style=social)](https://github.com/autonlab/auton-survival) <img align=right style=\"align:right;\" src=\"https://ndownloader.figshare.com/files/34052981\" width=30%> <br>", "} ``` [2] [Deep Parametric Time-to-Event Regression with Time-Varying Covariates. AAAI Spring Symposium", "other popular python survival analysis packages to experiment with Random Survival Forests and", "a specific intervention. Relies on the specially designed `auton_survival.models.cmhe.DeepCoxMixturesHeterogenousEffects` latent variable model. Dataset", "the following if you use `auton-survival`: [auton-survival: an Open-Source Package for Regression, Counterfactual", "for Regression, Counterfactual Estimation, Evaluation and Phenotyping with Censored Time-to-Event Data}, publisher =", "Conference}, pages={674--708}, year={2021}, organization={PMLR} } ``` [4] [Counterfactual Phenotyping with Censored Time-to-Events (2022)](https://arxiv.org/abs/2202.11089)</a>", "up. Survival Regression ------------------- #### `auton_survival.models` Training a Deep Cox Proportional Hazards Model", "The python package `auton-survival` is repository of reusable utilities for projects involving censored", "composite transform that does both Imputing ***and*** Scaling with a single function call.", "multiple different survival analysis models ```python # auton-survival Style Cross Validation Experiment. from", "LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.", "`auton_survival.experiments` Modules to perform standard survival analysis experiments. This module provides a top-level", "Initialize the phenotyper with the above hyperparameters. phenotyper = ClusteringPhenotyper(clustering_method=clustering_method, dim_red_method=dim_red_method, n_components=n_components, n_clusters=n_clusters)", "PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT", "Citing and References ---------------------- Please cite the following if you use `auton-survival`: [auton-survival:", "the Software is furnished to do so, subject to the following conditions: The", "``` Additionally, models and methods in `auton_survival` come from the following papers. Please", "IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING", "class is a composite transform that does both Imputing ***and*** Scaling with a", "A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT", "welcome. [on GitHub]: https://github.com/autonlab/auton-survival License ------- MIT License Copyright (c) 2022 Car<NAME> University,", "learnt phenogroups have differential response to a specific intervention. Relies on the specially", "propensity adjusted evaluation. **For complete details on** `auton-survival` **see**: <h3>• <a href=\"https://www.cs.cmu.edu/~chiragn/papers/auton_survival.pdf\">White Paper</a>", "Cox Proportional Hazards (DCPH) model model = models.cph.DeepCoxPH(layers=[100]) model.fit(features, outcomes.time, outcomes.event) # Predict", "modelled as regression to estimate the conditional survival distribution, \\( \\mathbb{P}(T>t|X) \\). As", "`Preprocessor` class is a composite transform that does both Imputing ***and*** Scaling with", "dataset features, outcomes = datasets.load_dataset('SUPPORT') ``` #### `auton_survival.preprocessing` This module provides a flexible", "Define the Hyperparameter grid to perform Cross Validation hyperparam_grid = {'n_estimators' : [50,", "Kaplan-Meier survival estimate. auton_survival.reporting.plot_kaplanmeier(outcomes, phenotypes) ``` - **Factual Phenotyping**: Involves the use of", "OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR", "\"0.1.0\" from .models.dsm import DeepSurvivalMachines from .models.dcm import DeepCoxMixtures from .models.cph import DeepCoxPH,", "Evaluation and Reporting ------------------------- #### `auton_survival.metrics` Helper functions to generate standard reports for", "to the following conditions: The above copyright notice and this permission notice shall", "Software, and to permit persons to whom the Software is furnished to do", "#### `auton_survival.preprocessing` This module provides a flexible API to perform imputation and data", "Contributing ------------ `auton-survival` is [on GitHub]. Bug reports and pull requests are welcome.", "\\infty) \\). * There is presence of censoring ie. a large number of", "transform that does both Imputing ***and*** Scaling with a single function call. ```python", "# Define the Hyperparameter grid to perform Cross Validation hyperparam_grid = {'n_estimators' :", "numerical features in the dataset. ``` Evaluation and Reporting ------------------------- #### `auton_survival.metrics` Helper", "APIs allowing rapid experimentation including dataset preprocessing, regression, counterfactual estimation, clustering and phenotyping", "MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE", "IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED", "Biomedical and Health Informatics}, volume={25}, number={8}, pages={3163--3175}, year={2021}, publisher={IEEE} } ``` [2] [Deep", "auton_survival.experiments import SurvivalRegressionCV # Define the Hyperparameter grid to perform Cross Validation hyperparam_grid", "lists would contain all the categorical and # numerical features in the dataset.", "(PCA) to 8 dimensions. dim_red_method, = 'pca', 8 # We use a Gaussian", "and methods in `auton_survival` come from the following papers. Please cite the individual", "Deep Survival Machines model using the SurvivalModel class. model = estimators.SurvivalModel(model='dsm') model.fit(features, outcomes)", "to follow up. Survival Regression ------------------- #### `auton_survival.models` Training a Deep Cox Proportional", "a large number of instances of data are lost to follow up. Survival", "Algorithms, Challenges, and Applications 2021}, series={Proceedings of Machine Learning Research}, publisher={PMLR}, } ```", "outcomes) # Predict risk at time horizons. predictions = model.predict_risk(features, times=[8, 12, 16])", "for Healthcare (2021)](https://arxiv.org/abs/2101.06536)</a> ``` @inproceedings{nagpal2021dcm, title={Deep Cox mixtures for survival regression}, author={<NAME> Yadlowsky,", "of this package, we refer to this task as **phenotyping**. `auton_survival.phenotyping` allows: -", "WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. <img", "CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR", "generate standard reports for common Survival Analysis tasks. Citing and References ---------------------- Please", "person obtaining a copy of this software and associated documentation files (the \"Software\"),", "# Preprocessing loaded Datasets from auton_survival import datasets features, outcomes = datasets.load_topcat() from", "Phenotyping and Knowledge Discovery ----------------------------------- #### `auton_survival.phenotyping` `auton_survival.phenotyping` allows extraction of latent clusters", "the context of this package, we refer to this task as **phenotyping**. `auton_survival.phenotyping`", "Censored Time-to-Event Data}, publisher = {arXiv}, year = {2022}, } ``` Additionally, models", "of patients that demonstrate similar outcomes. In the context of this package, we", "`auton-survival` ```python from auton_survival import datasets, preprocessing, models # Load the SUPPORT Dataset", "survival machines: Fully parametric survival regression and representation learning for censored data with", "wrappers around other popular python survival analysis packages to experiment with Random Survival", "regression methods. `auton_survival.estimators` also provides convenient wrappers around other popular python survival analysis", "in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED", "is, the learnt phenogroups have differential response to a specific intervention. Relies on", "Forests and Weibull Accelerated Failure Time regression models. ```python from auton_survival import estimators", "modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to", "and Scale) the features features = preprocessing.Preprocessor().fit_transform(features) # Train a Deep Cox Proportional", "height =\"120px\" src=\"https://www.cs.cmu.edu/~chiragn/cmu_logo.jpeg\"> <img align=\"right\" height =\"110px\" src=\"https://www.cs.cmu.edu/~chiragn/auton_logo.png\"> <br><br><br><br><br> ''' __version__ = \"0.1.0\"", "classes, `Scaler`, `Imputer` and `Preprocessor`. The `Preprocessor` class is a composite transform that", "Survival Regression ------------------- #### `auton_survival.models` Training a Deep Cox Proportional Hazards Model with", "{2022}, } ``` Additionally, models and methods in `auton_survival` come from the following", "regression}, author={<NAME> Yadlowsky, <NAME> Rostamzadeh, Negar and <NAME>}, booktitle={Machine Learning for Healthcare Conference},", "=\"110px\" src=\"https://www.cs.cmu.edu/~chiragn/auton_logo.png\"> <br><br><br><br><br> ''' __version__ = \"0.1.0\" from .models.dsm import DeepSurvivalMachines from .models.dcm", "<NAME> <NAME>}, title = {auton-survival: an Open-Source Package for Regression, Counterfactual Estimation, Evaluation", "employ them in your research: [1] [Deep Survival Machines: Fully Parametric Survival Regression", "survival rates. - **Counterfactual Phenotyping**: Involves learning phenotypes that demonstrate heterogenous treatment effects.", "Preprocessing --------------------------------- Helper functions to load and prerocsss various time-to-event data like the", "`auton_survival.preprocessing` This module provides a flexible API to perform imputation and data normalization", "Please cite the following if you use `auton-survival`: [auton-survival: an Open-Source Package for", "THE SOFTWARE. <img align=\"right\" height =\"120px\" src=\"https://www.cs.cmu.edu/~chiragn/cmu_logo.jpeg\"> <img align=\"right\" height =\"110px\" src=\"https://www.cs.cmu.edu/~chiragn/auton_logo.png\"> <br><br><br><br><br>", "import Preprocessing features = Preprocessor().fit_transform(features, cat_feats=['GENDER', 'ETHNICITY', 'SMOKE'], num_feats=['height', 'weight']) # The `cat_feats`", "= 'gmm', 3 # Initialize the phenotyper with the above hyperparameters. phenotyper =", "@inproceedings{nagpal2021dcm, title={Deep Cox mixtures for survival regression}, author={<NAME> Yadlowsky, <NAME> Rostamzadeh, Negar and", "that demonstrate differential observed survival rates. - **Counterfactual Phenotyping**: Involves learning phenotypes that", "are welcome. [on GitHub]: https://github.com/autonlab/auton-survival License ------- MIT License Copyright (c) 2022 Car<NAME>", "of a clustering algorithm on this representation. ```python from auton_survival.phenotyping import ClusteringPhenotyper #", "[Deep Survival Machines: Fully Parametric Survival Regression and Representation Learning for Censored Data", "'gmm', 3 # Initialize the phenotyper with the above hyperparameters. phenotyper = ClusteringPhenotyper(clustering_method=clustering_method,", "The Event distribution, \\( T \\) has positive support ie. \\( T \\in", "lost to follow up. Survival Regression ------------------- #### `auton_survival.models` Training a Deep Cox", "`auton_survival.models.dcm.DeepCoxMixtures` or `auton_survival.models.dsm.DeepSurvivalMachines` to recover phenogroups that demonstrate differential observed survival rates. -", "series={Proceedings of Machine Learning Research}, publisher={PMLR}, } ``` [3] [Deep Cox Mixtures for", "Estimation, Evaluation and Phenotyping with Censored Time-to-Event Data (2022)](https://arxiv.org/abs/2204.07276)</a> ``` @article{nagpal2022autonsurvival, url =", "and representation learning for censored data with competing risks}, author={<NAME> and <NAME> <NAME>},", "# Plot the phenogroup specific Kaplan-Meier survival estimate. auton_survival.reporting.plot_kaplanmeier(outcomes, phenotypes) ``` - **Factual", "censoring ie. a large number of instances of data are lost to follow", "of latent clusters or subgroups of patients that demonstrate similar outcomes. In the", "statistics and ML these scenarious are modelled as regression to estimate the conditional", "\\). * There is presence of censoring ie. a large number of instances", "(2021)](http://proceedings.mlr.press/v146/nagpal21a.html)</a> ``` @InProceedings{pmlr-v146-nagpal21a, title={Deep Parametric Time-to-Event Regression with Time-Varying Covariates}, author={<NAME> <NAME> <NAME>},", "outcomes = datasets.load_topcat() from auton_survival.preprocessing import Preprocessing features = Preprocessor().fit_transform(features, cat_feats=['GENDER', 'ETHNICITY', 'SMOKE'],", "\\( \\mathbb{P}(T>t|X) \\). As compared to typical regression problems, Survival Analysis differs in", "- **Counterfactual Phenotyping**: Involves learning phenotypes that demonstrate heterogenous treatment effects. That is,", "8 dimensions. dim_red_method, = 'pca', 8 # We use a Gaussian Mixture Model", "provides convenient wrappers around other popular python survival analysis packages to experiment with", "model.fit(features, outcomes) ``` Phenotyping and Knowledge Discovery ----------------------------------- #### `auton_survival.phenotyping` `auton_survival.phenotyping` allows extraction", ": ['sqrt', 'log2']} # Train a RSF model with cross-validation using the SurvivalRegressionCV", "to any person obtaining a copy of this software and associated documentation files", "import dataset features, outcomes = datasets.load_dataset('SUPPORT') ``` #### `auton_survival.preprocessing` This module provides a", "IEEE Journal of Biomedical and Health Informatics (2021)](https://arxiv.org/abs/2003.01176)</a> ``` @article{nagpal2021dsm, title={Deep survival machines:", "[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) &nbsp;&nbsp;&nbsp; [![GitHub Repo stars](https://img.shields.io/github/stars/autonlab/auton-survival?style=social)](https://github.com/autonlab/auton-survival) <img align=right style=\"align:right;\" src=\"https://ndownloader.figshare.com/files/34052981\" width=30%> <br> The", "Hazards (DCPH) model model = models.cph.DeepCoxPH(layers=[100]) model.fit(features, outcomes.time, outcomes.event) # Predict risk at", "stars](https://img.shields.io/github/stars/autonlab/auton-survival?style=social)](https://github.com/autonlab/auton-survival) <img align=right style=\"align:right;\" src=\"https://ndownloader.figshare.com/files/34052981\" width=30%> <br> The `auton-survival` Package --------------------------- The python", "a copy of this software and associated documentation files (the \"Software\"), to deal", "Software. THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS", "and pull requests are welcome. [on GitHub]: https://github.com/autonlab/auton-survival License ------- MIT License Copyright", "**Survival Analysis** involves estimating when an event of interest, \\( T \\) would", "``` #### `auton_survival.preprocessing` This module provides a flexible API to perform imputation and", "OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN", "from auton_survival.phenotyping import ClusteringPhenotyper # Dimensionality reduction using Principal Component Analysis (PCA) to", "# Train a Deep Survival Machines model using the SurvivalModel class. model =", "``` Evaluation and Reporting ------------------------- #### `auton_survival.metrics` Helper functions to generate standard reports", "WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF", "(2021)](https://arxiv.org/abs/2101.06536)</a> ``` @inproceedings{nagpal2021dcm, title={Deep Cox mixtures for survival regression}, author={<NAME> Yadlowsky, <NAME> Rostamzadeh,", "the phenotyper with the above hyperparameters. phenotyper = ClusteringPhenotyper(clustering_method=clustering_method, dim_red_method=dim_red_method, n_components=n_components, n_clusters=n_clusters) #", "and to permit persons to whom the Software is furnished to do so,", "author={<NAME> <NAME> <NAME> <NAME>}, journal={arXiv preprint arXiv:2202.11089}, year={2022} } ``` ## Installation ```console", "[on GitHub]: https://github.com/autonlab/auton-survival License ------- MIT License Copyright (c) 2022 Car<NAME> University, [Auton", "`auton-survival` is repository of reusable utilities for projects involving censored Time-to-Event Data. `auton-survival`", "Biomedical and Health Informatics (2021)](https://arxiv.org/abs/2003.01176)</a> ``` @article{nagpal2021dsm, title={Deep survival machines: Fully parametric survival", "width=30%> <br> The `auton-survival` Package --------------------------- The python package `auton-survival` is repository of", "Style Cross Validation Experiment. from auton_survival.experiments import SurvivalRegressionCV # Define the Hyperparameter grid", "regression models. ```python from auton_survival import estimators # Train a Deep Survival Machines", "covariates \\( X \\). In statistics and ML these scenarious are modelled as", "``` #### `auton_survival.estimators` This module provides a wrapper `auton_survival.estimators.SurvivalModel` to model survival datasets", "[on GitHub]. Bug reports and pull requests are welcome. [on GitHub]: https://github.com/autonlab/auton-survival License", "``` @inproceedings{nagpal2021dcm, title={Deep Cox mixtures for survival regression}, author={<NAME> Yadlowsky, <NAME> Rostamzadeh, Negar", "author={<NAME> and <NAME> <NAME>}, journal={IEEE Journal of Biomedical and Health Informatics}, volume={25}, number={8},", "survival analysis, involving cross-validation style experiments with multiple different survival analysis models ```python", "SurvivalRegressionCV(model='rsf', cv_folds=5, hyperparam_grid=hyperparam_grid) model.fit(features, outcomes) ``` Phenotyping and Knowledge Discovery ----------------------------------- #### `auton_survival.phenotyping`", "estimating when an event of interest, \\( T \\) would take places given", "specific intervention. Relies on the specially designed `auton_survival.models.cmhe.DeepCoxMixturesHeterogenousEffects` latent variable model. Dataset Loading", "\\). In statistics and ML these scenarious are modelled as regression to estimate", "data normalization for downstream machine learning models. The module has 3 distinct classes,", "Survival Analysis differs in two major ways: * The Event distribution, \\( T", "= phenotyper.fit_phenotype(features) # Plot the phenogroup specific Kaplan-Meier survival estimate. auton_survival.reporting.plot_kaplanmeier(outcomes, phenotypes) ```", "outcomes) ``` Phenotyping and Knowledge Discovery ----------------------------------- #### `auton_survival.phenotyping` `auton_survival.phenotyping` allows extraction of", "Failure Time regression models. ```python from auton_survival import estimators # Train a Deep", "<br><br><br><br><br> ''' __version__ = \"0.1.0\" from .models.dsm import DeepSurvivalMachines from .models.dcm import DeepCoxMixtures", "Notebooks</a></h3> What is Survival Analysis? -------------------------- **Survival Analysis** involves estimating when an event", "for Regression, Counterfactual Estimation, Evaluation and Phenotyping with Censored Time-to-Event Data (2022)](https://arxiv.org/abs/2204.07276)</a> ```", "estimators.SurvivalModel(model='dsm') model.fit(features, outcomes) # Predict risk at time horizons. predictions = model.predict_risk(features, times=[8,", "Censored Data with Competing Risks.\" IEEE Journal of Biomedical and Health Informatics (2021)](https://arxiv.org/abs/2003.01176)</a>", "## Installation ```console foo@bar:~$ git clone https://github.com/autonlab/auton_survival foo@bar:~$ pip install -r requirements.txt ```", "copy of this software and associated documentation files (the \"Software\"), to deal in", "demonstrate differential observed survival rates. - **Counterfactual Phenotyping**: Involves learning phenotypes that demonstrate", "Datasets from auton_survival import datasets features, outcomes = datasets.load_topcat() from auton_survival.preprocessing import Preprocessing", "# Preprocess (Impute and Scale) the features features = preprocessing.Preprocessor().fit_transform(features) # Train a", "Learning for Healthcare (2021)](https://arxiv.org/abs/2101.06536)</a> ``` @inproceedings{nagpal2021dcm, title={Deep Cox mixtures for survival regression}, author={<NAME>", "to experiment with Random Survival Forests and Weibull Accelerated Failure Time regression models.", "[Auton Lab](http://autonlab.org) Permission is hereby granted, free of charge, to any person obtaining", "dataset. ``` Evaluation and Reporting ------------------------- #### `auton_survival.metrics` Helper functions to generate standard", "OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR", "estimation, clustering and phenotyping and propensity adjusted evaluation. **For complete details on** `auton-survival`", "\\( T \\) would take places given some features or covariates \\( X", "by the use of a clustering algorithm on this representation. ```python from auton_survival.phenotyping", "(2022)](https://arxiv.org/abs/2204.07276)</a> ``` @article{nagpal2022autonsurvival, url = {https://arxiv.org/abs/2204.07276}, author = {<NAME> <NAME> <NAME>}, title =", "@InProceedings{pmlr-v146-nagpal21a, title={Deep Parametric Time-to-Event Regression with Time-Varying Covariates}, author={<NAME> <NAME> <NAME>}, booktitle={Proceedings of", "= datasets.load_topcat() from auton_survival.preprocessing import Preprocessing features = Preprocessor().fit_transform(features, cat_feats=['GENDER', 'ETHNICITY', 'SMOKE'], num_feats=['height',", "phenogroups. phenotypes = phenotyper.fit_phenotype(features) # Plot the phenogroup specific Kaplan-Meier survival estimate. auton_survival.reporting.plot_kaplanmeier(outcomes,", "Competing Risks.\" IEEE Journal of Biomedical and Health Informatics (2021)](https://arxiv.org/abs/2003.01176)</a> ``` @article{nagpal2021dsm, title={Deep", "hyperparam_grid=hyperparam_grid) model.fit(features, outcomes) ``` Phenotyping and Knowledge Discovery ----------------------------------- #### `auton_survival.phenotyping` `auton_survival.phenotyping` allows", "(2022)](https://arxiv.org/abs/2202.11089)</a> ``` @article{nagpal2022counterfactual, title={Counterfactual Phenotyping with Censored Time-to-Events}, author={<NAME> <NAME> <NAME> <NAME>}, journal={arXiv", "**Unsupervised Phenotyping**: Involves first performing dimensionality reduction on the inpute covariates \\( x", "all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED \"AS", "SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR", "survival analysis experiments. This module provides a top-level interface to run `auton-survival` style", "does both Imputing ***and*** Scaling with a single function call. ```python # Preprocessing", "repository of reusable utilities for projects involving censored Time-to-Event Data. `auton-survival` provides a", "title={Deep survival machines: Fully parametric survival regression and representation learning for censored data", "on this representation. ```python from auton_survival.phenotyping import ClusteringPhenotyper # Dimensionality reduction using Principal", "Dataset from auton_survival import dataset features, outcomes = datasets.load_dataset('SUPPORT') ``` #### `auton_survival.preprocessing` This", "packages to experiment with Random Survival Forests and Weibull Accelerated Failure Time regression", "volume={25}, number={8}, pages={3163--3175}, year={2021}, publisher={IEEE} } ``` [2] [Deep Parametric Time-to-Event Regression with", "with multiple different survival analysis models ```python # auton-survival Style Cross Validation Experiment.", "Applications 2021}, series={Proceedings of Machine Learning Research}, publisher={PMLR}, } ``` [3] [Deep Cox", "and References ---------------------- Please cite the following if you use `auton-survival`: [auton-survival: an", "to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the", "of instances of data are lost to follow up. Survival Regression ------------------- ####", "standard interface for multiple different survival regression methods. `auton_survival.estimators` also provides convenient wrappers", "task as **phenotyping**. `auton_survival.phenotyping` allows: - **Unsupervised Phenotyping**: Involves first performing dimensionality reduction", "representation learning for censored data with competing risks}, author={<NAME> and <NAME> <NAME>}, journal={IEEE", "`auton_survival.models.cmhe.DeepCoxMixturesHeterogenousEffects` latent variable model. Dataset Loading and Preprocessing --------------------------------- Helper functions to load", "publisher={IEEE} } ``` [2] [Deep Parametric Time-to-Event Regression with Time-Varying Covariates. AAAI Spring", "has positive support ie. \\( T \\in [0, \\infty) \\). * There is", "OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL", "DEALINGS IN THE SOFTWARE. <img align=\"right\" height =\"120px\" src=\"https://www.cs.cmu.edu/~chiragn/cmu_logo.jpeg\"> <img align=\"right\" height =\"110px\"", "features, outcomes = datasets.load_dataset('SUPPORT') ``` #### `auton_survival.preprocessing` This module provides a flexible API", "foo@bar:~$ pip install -r requirements.txt ``` Compatibility ------------- `auton-survival` requires `python` 3.5+ and", ".models.dsm import DeepSurvivalMachines from .models.dcm import DeepCoxMixtures from .models.cph import DeepCoxPH, DeepRecurrentCoxPH from", "WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE", "counterfactual estimation, clustering and phenotyping and propensity adjusted evaluation. **For complete details on**", "`auton_survival.estimators.SurvivalModel` to model survival datasets with standard survival (time-to-event) analysis methods. The use", "pages={674--708}, year={2021}, organization={PMLR} } ``` [4] [Counterfactual Phenotyping with Censored Time-to-Events (2022)](https://arxiv.org/abs/2202.11089)</a> ```", "journal={arXiv preprint arXiv:2202.11089}, year={2022} } ``` ## Installation ```console foo@bar:~$ git clone https://github.com/autonlab/auton_survival", "PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS", "from auton_survival import datasets, preprocessing, models # Load the SUPPORT Dataset outcomes, features", "metrics `auton-survival` requires `scikit-survival`. Contributing ------------ `auton-survival` is [on GitHub]. Bug reports and", "SurvivalModel class. model = estimators.SurvivalModel(model='dsm') model.fit(features, outcomes) # Predict risk at time horizons.", "positive support ie. \\( T \\in [0, \\infty) \\). * There is presence", "--------------------------- The python package `auton-survival` is repository of reusable utilities for projects involving", "\"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT", "```console foo@bar:~$ git clone https://github.com/autonlab/auton_survival foo@bar:~$ pip install -r requirements.txt ``` Compatibility -------------", "any person obtaining a copy of this software and associated documentation files (the", "Spring Symposium (2021)](http://proceedings.mlr.press/v146/nagpal21a.html)</a> ``` @InProceedings{pmlr-v146-nagpal21a, title={Deep Parametric Time-to-Event Regression with Time-Varying Covariates}, author={<NAME>", "&nbsp;&nbsp; • <a href=\"https://autonlab.github.io/auton-survival/\">Documentation</a> &nbsp;&nbsp; • <a href=\"https://nbviewer.org/github/autonlab/auton-survival/tree/master/examples/\">Demo Notebooks</a></h3> What is Survival Analysis?", "'weight']) # The `cat_feats` and `num_feats` lists would contain all the categorical and", "differential observed survival rates. - **Counterfactual Phenotyping**: Involves learning phenotypes that demonstrate heterogenous", "Regression, Counterfactual Estimation, Evaluation and Phenotyping with Censored Time-to-Event Data (2022)](https://arxiv.org/abs/2204.07276)</a> ``` @article{nagpal2022autonsurvival,", "flexible API to perform imputation and data normalization for downstream machine learning models.", "requests are welcome. [on GitHub]: https://github.com/autonlab/auton-survival License ------- MIT License Copyright (c) 2022", "THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. <img align=\"right\"", "THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER", "Weibull Accelerated Failure Time regression models. ```python from auton_survival import estimators # Train", "Preprocessor().fit_transform(features, cat_feats=['GENDER', 'ETHNICITY', 'SMOKE'], num_feats=['height', 'weight']) # The `cat_feats` and `num_feats` lists would", "Analysis differs in two major ways: * The Event distribution, \\( T \\)", "the Software, and to permit persons to whom the Software is furnished to", "clone https://github.com/autonlab/auton_survival foo@bar:~$ pip install -r requirements.txt ``` Compatibility ------------- `auton-survival` requires `python`", "reports and pull requests are welcome. [on GitHub]: https://github.com/autonlab/auton-survival License ------- MIT License", "copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED \"AS IS\",", "[auton-survival: an Open-Source Package for Regression, Counterfactual Estimation, Evaluation and Phenotyping with Censored", "models.cph.DeepCoxPH(layers=[100]) model.fit(features, outcomes.time, outcomes.event) # Predict risk at specific time horizons. predictions =", "} ``` Additionally, models and methods in `auton_survival` come from the following papers.", "distribution, \\( \\mathbb{P}(T>t|X) \\). As compared to typical regression problems, Survival Analysis differs", "Discovery ----------------------------------- #### `auton_survival.phenotyping` `auton_survival.phenotyping` allows extraction of latent clusters or subgroups of", "large number of instances of data are lost to follow up. Survival Regression", "and `num_feats` lists would contain all the categorical and # numerical features in", "covariance. clustering_method, n_clusters = 'gmm', 3 # Initialize the phenotyper with the above", "https://github.com/autonlab/auton_survival foo@bar:~$ pip install -r requirements.txt ``` Compatibility ------------- `auton-survival` requires `python` 3.5+", "the use of a clustering algorithm on this representation. ```python from auton_survival.phenotyping import", "a simple standard interface for multiple different survival regression methods. `auton_survival.estimators` also provides", "regression and representation learning for censored data with competing risks}, author={<NAME> and <NAME>", "12, 16]) ``` #### `auton_survival.experiments` Modules to perform standard survival analysis experiments. This", "with Competing Risks.\" IEEE Journal of Biomedical and Health Informatics (2021)](https://arxiv.org/abs/2003.01176)</a> ``` @article{nagpal2021dsm,", "Dataset outcomes, features = datasets.load_dataset(\"SUPPORT\") # Preprocess (Impute and Scale) the features features", "phenotypes that demonstrate heterogenous treatment effects. That is, the learnt phenogroups have differential", "[2] [Deep Parametric Time-to-Event Regression with Time-Varying Covariates. AAAI Spring Symposium (2021)](http://proceedings.mlr.press/v146/nagpal21a.html)</a> ```", "Scaling with a single function call. ```python # Preprocessing loaded Datasets from auton_survival", "Repo stars](https://img.shields.io/github/stars/autonlab/auton-survival?style=social)](https://github.com/autonlab/auton-survival) <img align=right style=\"align:right;\" src=\"https://ndownloader.figshare.com/files/34052981\" width=30%> <br> The `auton-survival` Package --------------------------- The", "preprocessing.Preprocessor().fit_transform(features) # Train a Deep Cox Proportional Hazards (DCPH) model model = models.cph.DeepCoxPH(layers=[100])", "Time-to-Events (2022)](https://arxiv.org/abs/2202.11089)</a> ``` @article{nagpal2022counterfactual, title={Counterfactual Phenotyping with Censored Time-to-Events}, author={<NAME> <NAME> <NAME> <NAME>},", "do so, subject to the following conditions: The above copyright notice and this", "the SUPPORT Dataset outcomes, features = datasets.load_dataset(\"SUPPORT\") # Preprocess (Impute and Scale) the", "would contain all the categorical and # numerical features in the dataset. ```", "Modules to perform standard survival analysis experiments. This module provides a top-level interface", "Time-to-Event Data. `auton-survival` provides a flexible APIs allowing rapid experimentation including dataset preprocessing,", "Journal of Biomedical and Health Informatics (2021)](https://arxiv.org/abs/2003.01176)</a> ``` @article{nagpal2021dsm, title={Deep survival machines: Fully", "allowing rapid experimentation including dataset preprocessing, regression, counterfactual estimation, clustering and phenotyping and", "3 components and diagonal covariance. clustering_method, n_clusters = 'gmm', 3 # Initialize the", "Deep Cox Proportional Hazards Model with `auton-survival` ```python from auton_survival import datasets, preprocessing,", "is furnished to do so, subject to the following conditions: The above copyright", "# Train a RSF model with cross-validation using the SurvivalRegressionCV class model =", "time horizons. predictions = model.predict_risk(features, times=[8, 12, 16]) ``` #### `auton_survival.experiments` Modules to", "model model = models.cph.DeepCoxPH(layers=[100]) model.fit(features, outcomes.time, outcomes.event) # Predict risk at specific time", "Evaluation and Phenotyping with Censored Time-to-Event Data (2022)](https://arxiv.org/abs/2204.07276)</a> ``` @article{nagpal2022autonsurvival, url = {https://arxiv.org/abs/2204.07276},", "<NAME>}, journal={IEEE Journal of Biomedical and Health Informatics}, volume={25}, number={8}, pages={3163--3175}, year={2021}, publisher={IEEE}", "# auton-survival Style Cross Validation Experiment. from auton_survival.experiments import SurvivalRegressionCV # Define the", "learning for censored data with competing risks}, author={<NAME> and <NAME> <NAME>}, journal={IEEE Journal", "IN THE SOFTWARE. <img align=\"right\" height =\"120px\" src=\"https://www.cs.cmu.edu/~chiragn/cmu_logo.jpeg\"> <img align=\"right\" height =\"110px\" src=\"https://www.cs.cmu.edu/~chiragn/auton_logo.png\">", "FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR", "INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR", "class model = SurvivalRegressionCV(model='rsf', cv_folds=5, hyperparam_grid=hyperparam_grid) model.fit(features, outcomes) ``` Phenotyping and Knowledge Discovery", "predictions = model.predict_risk(features, t=[8, 12, 16]) ``` #### `auton_survival.estimators` This module provides a", "and Health Informatics}, volume={25}, number={8}, pages={3163--3175}, year={2021}, publisher={IEEE} } ``` [2] [Deep Parametric", "(the \"Software\"), to deal in the Software without restriction, including without limitation the", "the individual papers if you employ them in your research: [1] [Deep Survival", "pages={3163--3175}, year={2021}, publisher={IEEE} } ``` [2] [Deep Parametric Time-to-Event Regression with Time-Varying Covariates.", "Regression, Counterfactual Estimation, Evaluation and Phenotyping with Censored Time-to-Event Data}, publisher = {arXiv},", "to permit persons to whom the Software is furnished to do so, subject", "Paper</a> &nbsp;&nbsp; • <a href=\"https://autonlab.github.io/auton-survival/\">Documentation</a> &nbsp;&nbsp; • <a href=\"https://nbviewer.org/github/autonlab/auton-survival/tree/master/examples/\">Demo Notebooks</a></h3> What is Survival", "OR OTHER DEALINGS IN THE SOFTWARE. <img align=\"right\" height =\"120px\" src=\"https://www.cs.cmu.edu/~chiragn/cmu_logo.jpeg\"> <img align=\"right\"", "MIT License Copyright (c) 2022 Car<NAME> University, [Auton Lab](http://autonlab.org) Permission is hereby granted,", "`SUPPORT`, `FRAMINGHAM` and `PBC` dataset for survival analysis. #### `auton_survival.datasets` ```python # Load", "take places given some features or covariates \\( X \\). In statistics and", "OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER", "Permission is hereby granted, free of charge, to any person obtaining a copy", "be included in all copies or substantial portions of the Software. THE SOFTWARE", "X \\). In statistics and ML these scenarious are modelled as regression to", "whom the Software is furnished to do so, subject to the following conditions:", "auton_survival import estimators # Train a Deep Survival Machines model using the SurvivalModel", "popular python survival analysis packages to experiment with Random Survival Forests and Weibull", "Validation hyperparam_grid = {'n_estimators' : [50, 100], 'max_depth' : [3, 5], 'max_features' :", "involving censored Time-to-Event Data. `auton-survival` provides a flexible APIs allowing rapid experimentation including", "that demonstrate heterogenous treatment effects. That is, the learnt phenogroups have differential response", "phenogroups that demonstrate differential observed survival rates. - **Counterfactual Phenotyping**: Involves learning phenotypes", "DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,", "the categorical and # numerical features in the dataset. ``` Evaluation and Reporting", "phenogroups have differential response to a specific intervention. Relies on the specially designed", "of the Software. THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY", "we refer to this task as **phenotyping**. `auton_survival.phenotyping` allows: - **Unsupervised Phenotyping**: Involves", "ie. \\( T \\in [0, \\infty) \\). * There is presence of censoring", "number={8}, pages={3163--3175}, year={2021}, publisher={IEEE} } ``` [2] [Deep Parametric Time-to-Event Regression with Time-Varying", "import estimators # Train a Deep Survival Machines model using the SurvivalModel class.", "Survival Regression. Conference on Machine Learning for Healthcare (2021)](https://arxiv.org/abs/2101.06536)</a> ``` @inproceedings{nagpal2021dcm, title={Deep Cox", "#### `auton_survival.metrics` Helper functions to generate standard reports for common Survival Analysis tasks.", "- **Unsupervised Phenotyping**: Involves first performing dimensionality reduction on the inpute covariates \\(", "to a specific intervention. Relies on the specially designed `auton_survival.models.cmhe.DeepCoxMixturesHeterogenousEffects` latent variable model.", "cite the following if you use `auton-survival`: [auton-survival: an Open-Source Package for Regression,", "on the specially designed `auton_survival.models.cmhe.DeepCoxMixturesHeterogenousEffects` latent variable model. Dataset Loading and Preprocessing ---------------------------------", "RSF model with cross-validation using the SurvivalRegressionCV class model = SurvivalRegressionCV(model='rsf', cv_folds=5, hyperparam_grid=hyperparam_grid)", "x \\) followed by the use of a clustering algorithm on this representation.", "experiments. This module provides a top-level interface to run `auton-survival` style experiments of", "['sqrt', 'log2']} # Train a RSF model with cross-validation using the SurvivalRegressionCV class", "to typical regression problems, Survival Analysis differs in two major ways: * The", "of structured latent variable models, `auton_survival.models.dcm.DeepCoxMixtures` or `auton_survival.models.dsm.DeepSurvivalMachines` to recover phenogroups that demonstrate", "#### `auton_survival.estimators` This module provides a wrapper `auton_survival.estimators.SurvivalModel` to model survival datasets with", "(c) 2022 Car<NAME> University, [Auton Lab](http://autonlab.org) Permission is hereby granted, free of charge,", "Counterfactual Estimation, Evaluation and Phenotyping with Censored Time-to-Event Data (2022)](https://arxiv.org/abs/2204.07276)</a> ``` @article{nagpal2022autonsurvival, url", "= ClusteringPhenotyper(clustering_method=clustering_method, dim_red_method=dim_red_method, n_components=n_components, n_clusters=n_clusters) # Fit and infer the phenogroups. phenotypes =", "} ``` [4] [Counterfactual Phenotyping with Censored Time-to-Events (2022)](https://arxiv.org/abs/2202.11089)</a> ``` @article{nagpal2022counterfactual, title={Counterfactual Phenotyping", "Rostamzadeh, Negar and <NAME>}, booktitle={Machine Learning for Healthcare Conference}, pages={674--708}, year={2021}, organization={PMLR} }", "and associated documentation files (the \"Software\"), to deal in the Software without restriction,", "&nbsp;&nbsp;&nbsp; [![codecov](https://codecov.io/gh/autonlab/DeepSurvivalMachines/branch/master/graph/badge.svg?token=FU1HB5O92D)](https://codecov.io/gh/autonlab/DeepSurvivalMachines) &nbsp;&nbsp;&nbsp; [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) &nbsp;&nbsp;&nbsp; [![GitHub Repo stars](https://img.shields.io/github/stars/autonlab/auton-survival?style=social)](https://github.com/autonlab/auton-survival) <img align=right style=\"align:right;\" src=\"https://ndownloader.figshare.com/files/34052981\"", "without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or", "Cross Validation Experiment. from auton_survival.experiments import SurvivalRegressionCV # Define the Hyperparameter grid to", "the wrapper allows a simple standard interface for multiple different survival regression methods.", "OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER", "analysis, involving cross-validation style experiments with multiple different survival analysis models ```python #", "CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE", "is presence of censoring ie. a large number of instances of data are", "features or covariates \\( X \\). In statistics and ML these scenarious are", "---------------------- Please cite the following if you use `auton-survival`: [auton-survival: an Open-Source Package", "or subgroups of patients that demonstrate similar outcomes. In the context of this", "That is, the learnt phenogroups have differential response to a specific intervention. Relies", "ClusteringPhenotyper(clustering_method=clustering_method, dim_red_method=dim_red_method, n_components=n_components, n_clusters=n_clusters) # Fit and infer the phenogroups. phenotypes = phenotyper.fit_phenotype(features)", "OTHER DEALINGS IN THE SOFTWARE. <img align=\"right\" height =\"120px\" src=\"https://www.cs.cmu.edu/~chiragn/cmu_logo.jpeg\"> <img align=\"right\" height", "to recover phenogroups that demonstrate differential observed survival rates. - **Counterfactual Phenotyping**: Involves", "a flexible API to perform imputation and data normalization for downstream machine learning", "is Survival Analysis? -------------------------- **Survival Analysis** involves estimating when an event of interest,", "preprocessing, models # Load the SUPPORT Dataset outcomes, features = datasets.load_dataset(\"SUPPORT\") # Preprocess", "the learnt phenogroups have differential response to a specific intervention. Relies on the", "single function call. ```python # Preprocessing loaded Datasets from auton_survival import datasets features,", "# Predict risk at time horizons. predictions = model.predict_risk(features, times=[8, 12, 16]) ```", "API to perform imputation and data normalization for downstream machine learning models. The", "datasets, preprocessing, models # Load the SUPPORT Dataset outcomes, features = datasets.load_dataset(\"SUPPORT\") #", "a Deep Cox Proportional Hazards (DCPH) model model = models.cph.DeepCoxPH(layers=[100]) model.fit(features, outcomes.time, outcomes.event)", "Copyright (c) 2022 Car<NAME> University, [Auton Lab](http://autonlab.org) Permission is hereby granted, free of", "experiment with Random Survival Forests and Weibull Accelerated Failure Time regression models. ```python", "datasets.load_dataset('SUPPORT') ``` #### `auton_survival.preprocessing` This module provides a flexible API to perform imputation", "and data normalization for downstream machine learning models. The module has 3 distinct", "following papers. Please cite the individual papers if you employ them in your", "``` [4] [Counterfactual Phenotyping with Censored Time-to-Events (2022)](https://arxiv.org/abs/2202.11089)</a> ``` @article{nagpal2022counterfactual, title={Counterfactual Phenotyping with", "risks}, author={<NAME> and <NAME> <NAME>}, journal={IEEE Journal of Biomedical and Health Informatics}, volume={25},", "------------- `auton-survival` requires `python` 3.5+ and `pytorch` 1.1+. To evaluate performance using standard", "<NAME>}, title = {auton-survival: an Open-Source Package for Regression, Counterfactual Estimation, Evaluation and", "Informatics (2021)](https://arxiv.org/abs/2003.01176)</a> ``` @article{nagpal2021dsm, title={Deep survival machines: Fully parametric survival regression and representation", "`scikit-survival`. Contributing ------------ `auton-survival` is [on GitHub]. Bug reports and pull requests are", "SurvivalRegressionCV # Define the Hyperparameter grid to perform Cross Validation hyperparam_grid = {'n_estimators'", "Load the SUPPORT Dataset outcomes, features = datasets.load_dataset(\"SUPPORT\") # Preprocess (Impute and Scale)", "this representation. ```python from auton_survival.phenotyping import ClusteringPhenotyper # Dimensionality reduction using Principal Component", "install -r requirements.txt ``` Compatibility ------------- `auton-survival` requires `python` 3.5+ and `pytorch` 1.1+.", "with a single function call. ```python # Preprocessing loaded Datasets from auton_survival import", "with competing risks}, author={<NAME> and <NAME> <NAME>}, journal={IEEE Journal of Biomedical and Health", "Machine Learning Research}, publisher={PMLR}, } ``` [3] [Deep Cox Mixtures for Survival Regression.", "OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING", "some features or covariates \\( X \\). In statistics and ML these scenarious", "on Machine Learning for Healthcare (2021)](https://arxiv.org/abs/2101.06536)</a> ``` @inproceedings{nagpal2021dcm, title={Deep Cox mixtures for survival", "* The Event distribution, \\( T \\) has positive support ie. \\( T", "Gaussian Mixture Model (GMM) with 3 components and diagonal covariance. clustering_method, n_clusters =", "Journal of Biomedical and Health Informatics}, volume={25}, number={8}, pages={3163--3175}, year={2021}, publisher={IEEE} } ```", "pip install -r requirements.txt ``` Compatibility ------------- `auton-survival` requires `python` 3.5+ and `pytorch`", "this permission notice shall be included in all copies or substantial portions of", "Regression ------------------- #### `auton_survival.models` Training a Deep Cox Proportional Hazards Model with `auton-survival`", "the inpute covariates \\( x \\) followed by the use of a clustering", "Learning for Healthcare Conference}, pages={674--708}, year={2021}, organization={PMLR} } ``` [4] [Counterfactual Phenotyping with", "models # Load the SUPPORT Dataset outcomes, features = datasets.load_dataset(\"SUPPORT\") # Preprocess (Impute", "title={Deep Cox mixtures for survival regression}, author={<NAME> Yadlowsky, <NAME> Rostamzadeh, Negar and <NAME>},", "reduction on the inpute covariates \\( x \\) followed by the use of", "IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A", "`auton_survival.datasets` ```python # Load the SUPPORT Dataset from auton_survival import dataset features, outcomes", "hyperparameters. phenotyper = ClusteringPhenotyper(clustering_method=clustering_method, dim_red_method=dim_red_method, n_components=n_components, n_clusters=n_clusters) # Fit and infer the phenogroups.", "SUPPORT Dataset from auton_survival import dataset features, outcomes = datasets.load_dataset('SUPPORT') ``` #### `auton_survival.preprocessing`", "#### `auton_survival.experiments` Modules to perform standard survival analysis experiments. This module provides a", "Model (GMM) with 3 components and diagonal covariance. clustering_method, n_clusters = 'gmm', 3", "would take places given some features or covariates \\( X \\). In statistics", "use `auton-survival`: [auton-survival: an Open-Source Package for Regression, Counterfactual Estimation, Evaluation and Phenotyping", "phenogroup specific Kaplan-Meier survival estimate. auton_survival.reporting.plot_kaplanmeier(outcomes, phenotypes) ``` - **Factual Phenotyping**: Involves the", "specific Kaplan-Meier survival estimate. auton_survival.reporting.plot_kaplanmeier(outcomes, phenotypes) ``` - **Factual Phenotyping**: Involves the use", "involves estimating when an event of interest, \\( T \\) would take places", "The `Preprocessor` class is a composite transform that does both Imputing ***and*** Scaling", "BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION", "`auton-survival`: [auton-survival: an Open-Source Package for Regression, Counterfactual Estimation, Evaluation and Phenotyping with", "# Load the SUPPORT Dataset from auton_survival import dataset features, outcomes = datasets.load_dataset('SUPPORT')", "Helper functions to generate standard reports for common Survival Analysis tasks. Citing and", "***and*** Scaling with a single function call. ```python # Preprocessing loaded Datasets from", "Software is furnished to do so, subject to the following conditions: The above", "at specific time horizons. predictions = model.predict_risk(features, t=[8, 12, 16]) ``` #### `auton_survival.estimators`", "rapid experimentation including dataset preprocessing, regression, counterfactual estimation, clustering and phenotyping and propensity", "are modelled as regression to estimate the conditional survival distribution, \\( \\mathbb{P}(T>t|X) \\).", "for Healthcare Conference}, pages={674--708}, year={2021}, organization={PMLR} } ``` [4] [Counterfactual Phenotyping with Censored", "align=right style=\"align:right;\" src=\"https://ndownloader.figshare.com/files/34052981\" width=30%> <br> The `auton-survival` Package --------------------------- The python package `auton-survival`", "of Biomedical and Health Informatics (2021)](https://arxiv.org/abs/2003.01176)</a> ``` @article{nagpal2021dsm, title={Deep survival machines: Fully parametric", "datasets with standard survival (time-to-event) analysis methods. The use of the wrapper allows", "python survival analysis packages to experiment with Random Survival Forests and Weibull Accelerated", "`auton-survival` provides a flexible APIs allowing rapid experimentation including dataset preprocessing, regression, counterfactual", "Data with Competing Risks.\" IEEE Journal of Biomedical and Health Informatics (2021)](https://arxiv.org/abs/2003.01176)</a> ```", "documentation files (the \"Software\"), to deal in the Software without restriction, including without", "for Survival Regression. Conference on Machine Learning for Healthcare (2021)](https://arxiv.org/abs/2101.06536)</a> ``` @inproceedings{nagpal2021dcm, title={Deep", "files (the \"Software\"), to deal in the Software without restriction, including without limitation", "DeepSurvivalMachines from .models.dcm import DeepCoxMixtures from .models.cph import DeepCoxPH, DeepRecurrentCoxPH from .models.cmhe import", "[![GitHub Repo stars](https://img.shields.io/github/stars/autonlab/auton-survival?style=social)](https://github.com/autonlab/auton-survival) <img align=right style=\"align:right;\" src=\"https://ndownloader.figshare.com/files/34052981\" width=30%> <br> The `auton-survival` Package ---------------------------", "= preprocessing.Preprocessor().fit_transform(features) # Train a Deep Cox Proportional Hazards (DCPH) model model =", "machines: Fully parametric survival regression and representation learning for censored data with competing", "to do so, subject to the following conditions: The above copyright notice and", "\\). As compared to typical regression problems, Survival Analysis differs in two major", "Survival Forests and Weibull Accelerated Failure Time regression models. ```python from auton_survival import", "to perform imputation and data normalization for downstream machine learning models. The module", "'log2']} # Train a RSF model with cross-validation using the SurvivalRegressionCV class model", "the Software. THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,", "href=\"https://autonlab.github.io/auton-survival/\">Documentation</a> &nbsp;&nbsp; • <a href=\"https://nbviewer.org/github/autonlab/auton-survival/tree/master/examples/\">Demo Notebooks</a></h3> What is Survival Analysis? -------------------------- **Survival Analysis**", "Predict risk at specific time horizons. predictions = model.predict_risk(features, t=[8, 12, 16]) ```", "and Applications 2021}, series={Proceedings of Machine Learning Research}, publisher={PMLR}, } ``` [3] [Deep", "AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN", "Parametric Survival Regression and Representation Learning for Censored Data with Competing Risks.\" IEEE", "categorical and # numerical features in the dataset. ``` Evaluation and Reporting -------------------------", "License ------- MIT License Copyright (c) 2022 Car<NAME> University, [Auton Lab](http://autonlab.org) Permission is", "git clone https://github.com/autonlab/auton_survival foo@bar:~$ pip install -r requirements.txt ``` Compatibility ------------- `auton-survival` requires", "in two major ways: * The Event distribution, \\( T \\) has positive", "to perform Cross Validation hyperparam_grid = {'n_estimators' : [50, 100], 'max_depth' : [3,", "src=\"https://www.cs.cmu.edu/~chiragn/cmu_logo.jpeg\"> <img align=\"right\" height =\"110px\" src=\"https://www.cs.cmu.edu/~chiragn/auton_logo.png\"> <br><br><br><br><br> ''' __version__ = \"0.1.0\" from .models.dsm", "the use of structured latent variable models, `auton_survival.models.dcm.DeepCoxMixtures` or `auton_survival.models.dsm.DeepSurvivalMachines` to recover phenogroups", "risk at time horizons. predictions = model.predict_risk(features, times=[8, 12, 16]) ``` #### `auton_survival.experiments`", "University, [Auton Lab](http://autonlab.org) Permission is hereby granted, free of charge, to any person", "or `auton_survival.models.dsm.DeepSurvivalMachines` to recover phenogroups that demonstrate differential observed survival rates. - **Counterfactual", "clustering_method, n_clusters = 'gmm', 3 # Initialize the phenotyper with the above hyperparameters.", "Deep Cox Proportional Hazards (DCPH) model model = models.cph.DeepCoxPH(layers=[100]) model.fit(features, outcomes.time, outcomes.event) #", "of Biomedical and Health Informatics}, volume={25}, number={8}, pages={3163--3175}, year={2021}, publisher={IEEE} } ``` [2]", "model = estimators.SurvivalModel(model='dsm') model.fit(features, outcomes) # Predict risk at time horizons. predictions =", "#### `auton_survival.phenotyping` `auton_survival.phenotyping` allows extraction of latent clusters or subgroups of patients that", "that does both Imputing ***and*** Scaling with a single function call. ```python #", "Research}, publisher={PMLR}, } ``` [3] [Deep Cox Mixtures for Survival Regression. Conference on", "Data}, publisher = {arXiv}, year = {2022}, } ``` Additionally, models and methods", "href=\"https://nbviewer.org/github/autonlab/auton-survival/tree/master/examples/\">Demo Notebooks</a></h3> What is Survival Analysis? -------------------------- **Survival Analysis** involves estimating when an", "interest, \\( T \\) would take places given some features or covariates \\(", "a top-level interface to run `auton-survival` style experiments of survival analysis, involving cross-validation", "and Knowledge Discovery ----------------------------------- #### `auton_survival.phenotyping` `auton_survival.phenotyping` allows extraction of latent clusters or", "SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,", "align=\"right\" height =\"110px\" src=\"https://www.cs.cmu.edu/~chiragn/auton_logo.png\"> <br><br><br><br><br> ''' __version__ = \"0.1.0\" from .models.dsm import DeepSurvivalMachines", "Plot the phenogroup specific Kaplan-Meier survival estimate. auton_survival.reporting.plot_kaplanmeier(outcomes, phenotypes) ``` - **Factual Phenotyping**:", "auton_survival import datasets, preprocessing, models # Load the SUPPORT Dataset outcomes, features =", "= SurvivalRegressionCV(model='rsf', cv_folds=5, hyperparam_grid=hyperparam_grid) model.fit(features, outcomes) ``` Phenotyping and Knowledge Discovery ----------------------------------- ####", "regression, counterfactual estimation, clustering and phenotyping and propensity adjusted evaluation. **For complete details", "subgroups of patients that demonstrate similar outcomes. In the context of this package,", "LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT", "substantial portions of the Software. THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY", "complete details on** `auton-survival` **see**: <h3>• <a href=\"https://www.cs.cmu.edu/~chiragn/papers/auton_survival.pdf\">White Paper</a> &nbsp;&nbsp; • <a href=\"https://autonlab.github.io/auton-survival/\">Documentation</a>", "and propensity adjusted evaluation. **For complete details on** `auton-survival` **see**: <h3>• <a href=\"https://www.cs.cmu.edu/~chiragn/papers/auton_survival.pdf\">White", "journal={IEEE Journal of Biomedical and Health Informatics}, volume={25}, number={8}, pages={3163--3175}, year={2021}, publisher={IEEE} }", "`num_feats` lists would contain all the categorical and # numerical features in the", "TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN", "utilities for projects involving censored Time-to-Event Data. `auton-survival` provides a flexible APIs allowing", "on Survival Prediction - Algorithms, Challenges, and Applications 2021}, series={Proceedings of Machine Learning", "Predict risk at time horizons. predictions = model.predict_risk(features, times=[8, 12, 16]) ``` ####", "reusable utilities for projects involving censored Time-to-Event Data. `auton-survival` provides a flexible APIs", "```python from auton_survival.phenotyping import ClusteringPhenotyper # Dimensionality reduction using Principal Component Analysis (PCA)", "IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR", "time horizons. predictions = model.predict_risk(features, t=[8, 12, 16]) ``` #### `auton_survival.estimators` This module", "**Counterfactual Phenotyping**: Involves learning phenotypes that demonstrate heterogenous treatment effects. That is, the", "# Dimensionality reduction using Principal Component Analysis (PCA) to 8 dimensions. dim_red_method, =", "cv_folds=5, hyperparam_grid=hyperparam_grid) model.fit(features, outcomes) ``` Phenotyping and Knowledge Discovery ----------------------------------- #### `auton_survival.phenotyping` `auton_survival.phenotyping`", "<NAME> <NAME> <NAME>}, journal={arXiv preprint arXiv:2202.11089}, year={2022} } ``` ## Installation ```console foo@bar:~$", "booktitle={Proceedings of AAAI Spring Symposium on Survival Prediction - Algorithms, Challenges, and Applications", "phenotypes) ``` - **Factual Phenotyping**: Involves the use of structured latent variable models,", "of survival analysis, involving cross-validation style experiments with multiple different survival analysis models", "Machines model using the SurvivalModel class. model = estimators.SurvivalModel(model='dsm') model.fit(features, outcomes) # Predict", "Phenotyping with Censored Time-to-Event Data (2022)](https://arxiv.org/abs/2204.07276)</a> ``` @article{nagpal2022autonsurvival, url = {https://arxiv.org/abs/2204.07276}, author =", "as **phenotyping**. `auton_survival.phenotyping` allows: - **Unsupervised Phenotyping**: Involves first performing dimensionality reduction on", "auton_survival.preprocessing import Preprocessing features = Preprocessor().fit_transform(features, cat_feats=['GENDER', 'ETHNICITY', 'SMOKE'], num_feats=['height', 'weight']) # The", "reduction using Principal Component Analysis (PCA) to 8 dimensions. dim_red_method, = 'pca', 8", "is a composite transform that does both Imputing ***and*** Scaling with a single", "variable models, `auton_survival.models.dcm.DeepCoxMixtures` or `auton_survival.models.dsm.DeepSurvivalMachines` to recover phenogroups that demonstrate differential observed survival", "} ``` ## Installation ```console foo@bar:~$ git clone https://github.com/autonlab/auton_survival foo@bar:~$ pip install -r", "problems, Survival Analysis differs in two major ways: * The Event distribution, \\(", "scenarious are modelled as regression to estimate the conditional survival distribution, \\( \\mathbb{P}(T>t|X)", "author = {<NAME> <NAME> <NAME>}, title = {auton-survival: an Open-Source Package for Regression,", "followed by the use of a clustering algorithm on this representation. ```python from", "OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES", "analysis experiments. This module provides a top-level interface to run `auton-survival` style experiments", "with Random Survival Forests and Weibull Accelerated Failure Time regression models. ```python from", "the SUPPORT Dataset from auton_survival import dataset features, outcomes = datasets.load_dataset('SUPPORT') ``` ####", "<a href=\"https://nbviewer.org/github/autonlab/auton-survival/tree/master/examples/\">Demo Notebooks</a></h3> What is Survival Analysis? -------------------------- **Survival Analysis** involves estimating when", "wrapper allows a simple standard interface for multiple different survival regression methods. `auton_survival.estimators`", "2022 Car<NAME> University, [Auton Lab](http://autonlab.org) Permission is hereby granted, free of charge, to", "evaluate performance using standard metrics `auton-survival` requires `scikit-survival`. Contributing ------------ `auton-survival` is [on", "Preprocessing features = Preprocessor().fit_transform(features, cat_feats=['GENDER', 'ETHNICITY', 'SMOKE'], num_feats=['height', 'weight']) # The `cat_feats` and", "features = datasets.load_dataset(\"SUPPORT\") # Preprocess (Impute and Scale) the features features = preprocessing.Preprocessor().fit_transform(features)", "`auton_survival.models.dsm.DeepSurvivalMachines` to recover phenogroups that demonstrate differential observed survival rates. - **Counterfactual Phenotyping**:", "} ``` [3] [Deep Cox Mixtures for Survival Regression. Conference on Machine Learning", "given some features or covariates \\( X \\). In statistics and ML these", "<br> The `auton-survival` Package --------------------------- The python package `auton-survival` is repository of reusable", "following if you use `auton-survival`: [auton-survival: an Open-Source Package for Regression, Counterfactual Estimation,", "{auton-survival: an Open-Source Package for Regression, Counterfactual Estimation, Evaluation and Phenotyping with Censored", "survival datasets with standard survival (time-to-event) analysis methods. The use of the wrapper", "permission notice shall be included in all copies or substantial portions of the", "a Deep Survival Machines model using the SurvivalModel class. model = estimators.SurvivalModel(model='dsm') model.fit(features,", "[![codecov](https://codecov.io/gh/autonlab/DeepSurvivalMachines/branch/master/graph/badge.svg?token=FU1HB5O92D)](https://codecov.io/gh/autonlab/DeepSurvivalMachines) &nbsp;&nbsp;&nbsp; [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) &nbsp;&nbsp;&nbsp; [![GitHub Repo stars](https://img.shields.io/github/stars/autonlab/auton-survival?style=social)](https://github.com/autonlab/auton-survival) <img align=right style=\"align:right;\" src=\"https://ndownloader.figshare.com/files/34052981\" width=30%>", "allows a simple standard interface for multiple different survival regression methods. `auton_survival.estimators` also", "In the context of this package, we refer to this task as **phenotyping**.", "OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH", "requires `scikit-survival`. Contributing ------------ `auton-survival` is [on GitHub]. Bug reports and pull requests", "16]) ``` #### `auton_survival.experiments` Modules to perform standard survival analysis experiments. This module", "import DeepSurvivalMachines from .models.dcm import DeepCoxMixtures from .models.cph import DeepCoxPH, DeepRecurrentCoxPH from .models.cmhe", "Challenges, and Applications 2021}, series={Proceedings of Machine Learning Research}, publisher={PMLR}, } ``` [3]", "to model survival datasets with standard survival (time-to-event) analysis methods. The use of", "dim_red_method, = 'pca', 8 # We use a Gaussian Mixture Model (GMM) with", "Representation Learning for Censored Data with Competing Risks.\" IEEE Journal of Biomedical and", "dimensionality reduction on the inpute covariates \\( x \\) followed by the use", "methods. The use of the wrapper allows a simple standard interface for multiple", "and Phenotyping with Censored Time-to-Event Data}, publisher = {arXiv}, year = {2022}, }", "copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and", "and # numerical features in the dataset. ``` Evaluation and Reporting ------------------------- ####", "# Predict risk at specific time horizons. predictions = model.predict_risk(features, t=[8, 12, 16])", "GitHub]: https://github.com/autonlab/auton-survival License ------- MIT License Copyright (c) 2022 Car<NAME> University, [Auton Lab](http://autonlab.org)", "typical regression problems, Survival Analysis differs in two major ways: * The Event", "HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN", "This module provides a wrapper `auton_survival.estimators.SurvivalModel` to model survival datasets with standard survival", "an Open-Source Package for Regression, Counterfactual Estimation, Evaluation and Phenotyping with Censored Time-to-Event", "following conditions: The above copyright notice and this permission notice shall be included", "features = Preprocessor().fit_transform(features, cat_feats=['GENDER', 'ETHNICITY', 'SMOKE'], num_feats=['height', 'weight']) # The `cat_feats` and `num_feats`", "Open-Source Package for Regression, Counterfactual Estimation, Evaluation and Phenotyping with Censored Time-to-Event Data},", ": [3, 5], 'max_features' : ['sqrt', 'log2']} # Train a RSF model with", "Censored Time-to-Events}, author={<NAME> <NAME> <NAME> <NAME>}, journal={arXiv preprint arXiv:2202.11089}, year={2022} } ``` ##", "The above copyright notice and this permission notice shall be included in all", "machine learning models. The module has 3 distinct classes, `Scaler`, `Imputer` and `Preprocessor`.", "Installation ```console foo@bar:~$ git clone https://github.com/autonlab/auton_survival foo@bar:~$ pip install -r requirements.txt ``` Compatibility", "using standard metrics `auton-survival` requires `scikit-survival`. Contributing ------------ `auton-survival` is [on GitHub]. Bug", "#### `auton_survival.datasets` ```python # Load the SUPPORT Dataset from auton_survival import dataset features,", "<img align=\"right\" height =\"120px\" src=\"https://www.cs.cmu.edu/~chiragn/cmu_logo.jpeg\"> <img align=\"right\" height =\"110px\" src=\"https://www.cs.cmu.edu/~chiragn/auton_logo.png\"> <br><br><br><br><br> ''' __version__", "and Weibull Accelerated Failure Time regression models. ```python from auton_survival import estimators #", "Loading and Preprocessing --------------------------------- Helper functions to load and prerocsss various time-to-event data", "run `auton-survival` style experiments of survival analysis, involving cross-validation style experiments with multiple", "from auton_survival import estimators # Train a Deep Survival Machines model using the", "granted, free of charge, to any person obtaining a copy of this software", "• <a href=\"https://autonlab.github.io/auton-survival/\">Documentation</a> &nbsp;&nbsp; • <a href=\"https://nbviewer.org/github/autonlab/auton-survival/tree/master/examples/\">Demo Notebooks</a></h3> What is Survival Analysis? --------------------------", "limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell", "height =\"110px\" src=\"https://www.cs.cmu.edu/~chiragn/auton_logo.png\"> <br><br><br><br><br> ''' __version__ = \"0.1.0\" from .models.dsm import DeepSurvivalMachines from", "SurvivalRegressionCV class model = SurvivalRegressionCV(model='rsf', cv_folds=5, hyperparam_grid=hyperparam_grid) model.fit(features, outcomes) ``` Phenotyping and Knowledge", "for survival regression}, author={<NAME> Yadlowsky, <NAME> Rostamzadeh, Negar and <NAME>}, booktitle={Machine Learning for", "for multiple different survival regression methods. `auton_survival.estimators` also provides convenient wrappers around other", "``` @article{nagpal2022counterfactual, title={Counterfactual Phenotyping with Censored Time-to-Events}, author={<NAME> <NAME> <NAME> <NAME>}, journal={arXiv preprint", "title={Deep Parametric Time-to-Event Regression with Time-Varying Covariates}, author={<NAME> <NAME> <NAME>}, booktitle={Proceedings of AAAI", ": [50, 100], 'max_depth' : [3, 5], 'max_features' : ['sqrt', 'log2']} # Train", "ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF", "WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO", "provides a flexible APIs allowing rapid experimentation including dataset preprocessing, regression, counterfactual estimation,", "8 # We use a Gaussian Mixture Model (GMM) with 3 components and", "------- MIT License Copyright (c) 2022 Car<NAME> University, [Auton Lab](http://autonlab.org) Permission is hereby", "[Deep Cox Mixtures for Survival Regression. Conference on Machine Learning for Healthcare (2021)](https://arxiv.org/abs/2101.06536)</a>", "```python from auton_survival import estimators # Train a Deep Survival Machines model using", "is [on GitHub]. Bug reports and pull requests are welcome. [on GitHub]: https://github.com/autonlab/auton-survival", "using Principal Component Analysis (PCA) to 8 dimensions. dim_red_method, = 'pca', 8 #", "`Scaler`, `Imputer` and `Preprocessor`. The `Preprocessor` class is a composite transform that does", "standard reports for common Survival Analysis tasks. Citing and References ---------------------- Please cite", "on** `auton-survival` **see**: <h3>• <a href=\"https://www.cs.cmu.edu/~chiragn/papers/auton_survival.pdf\">White Paper</a> &nbsp;&nbsp; • <a href=\"https://autonlab.github.io/auton-survival/\">Documentation</a> &nbsp;&nbsp; •", "dataset for survival analysis. #### `auton_survival.datasets` ```python # Load the SUPPORT Dataset from", "models ```python # auton-survival Style Cross Validation Experiment. from auton_survival.experiments import SurvivalRegressionCV #", "= model.predict_risk(features, times=[8, 12, 16]) ``` #### `auton_survival.experiments` Modules to perform standard survival", "''' __version__ = \"0.1.0\" from .models.dsm import DeepSurvivalMachines from .models.dcm import DeepCoxMixtures from", "and <NAME>}, booktitle={Machine Learning for Healthcare Conference}, pages={674--708}, year={2021}, organization={PMLR} } ``` [4]", "year = {2022}, } ``` Additionally, models and methods in `auton_survival` come from", "censored data with competing risks}, author={<NAME> and <NAME> <NAME>}, journal={IEEE Journal of Biomedical", "to 8 dimensions. dim_red_method, = 'pca', 8 # We use a Gaussian Mixture", "&nbsp;&nbsp;&nbsp; [![GitHub Repo stars](https://img.shields.io/github/stars/autonlab/auton-survival?style=social)](https://github.com/autonlab/auton-survival) <img align=right style=\"align:right;\" src=\"https://ndownloader.figshare.com/files/34052981\" width=30%> <br> The `auton-survival` Package", "for Censored Data with Competing Risks.\" IEEE Journal of Biomedical and Health Informatics", "the SurvivalRegressionCV class model = SurvivalRegressionCV(model='rsf', cv_folds=5, hyperparam_grid=hyperparam_grid) model.fit(features, outcomes) ``` Phenotyping and", "Knowledge Discovery ----------------------------------- #### `auton_survival.phenotyping` `auton_survival.phenotyping` allows extraction of latent clusters or subgroups", "\\( T \\in [0, \\infty) \\). * There is presence of censoring ie.", "and/or sell copies of the Software, and to permit persons to whom the", "clusters or subgroups of patients that demonstrate similar outcomes. In the context of", "components and diagonal covariance. clustering_method, n_clusters = 'gmm', 3 # Initialize the phenotyper", "Regression with Time-Varying Covariates. AAAI Spring Symposium (2021)](http://proceedings.mlr.press/v146/nagpal21a.html)</a> ``` @InProceedings{pmlr-v146-nagpal21a, title={Deep Parametric Time-to-Event", "`auton_survival.estimators` This module provides a wrapper `auton_survival.estimators.SurvivalModel` to model survival datasets with standard", "of charge, to any person obtaining a copy of this software and associated", "To evaluate performance using standard metrics `auton-survival` requires `scikit-survival`. Contributing ------------ `auton-survival` is", "= \"0.1.0\" from .models.dsm import DeepSurvivalMachines from .models.dcm import DeepCoxMixtures from .models.cph import", "align=\"right\" height =\"120px\" src=\"https://www.cs.cmu.edu/~chiragn/cmu_logo.jpeg\"> <img align=\"right\" height =\"110px\" src=\"https://www.cs.cmu.edu/~chiragn/auton_logo.png\"> <br><br><br><br><br> ''' __version__ =", "<a href=\"https://www.cs.cmu.edu/~chiragn/papers/auton_survival.pdf\">White Paper</a> &nbsp;&nbsp; • <a href=\"https://autonlab.github.io/auton-survival/\">Documentation</a> &nbsp;&nbsp; • <a href=\"https://nbviewer.org/github/autonlab/auton-survival/tree/master/examples/\">Demo Notebooks</a></h3> What", "of the wrapper allows a simple standard interface for multiple different survival regression", "involving cross-validation style experiments with multiple different survival analysis models ```python # auton-survival", "n_clusters=n_clusters) # Fit and infer the phenogroups. phenotypes = phenotyper.fit_phenotype(features) # Plot the", "cite the individual papers if you employ them in your research: [1] [Deep", "{'n_estimators' : [50, 100], 'max_depth' : [3, 5], 'max_features' : ['sqrt', 'log2']} #", "call. ```python # Preprocessing loaded Datasets from auton_survival import datasets features, outcomes =", "auton_survival.phenotyping import ClusteringPhenotyper # Dimensionality reduction using Principal Component Analysis (PCA) to 8", "'SMOKE'], num_feats=['height', 'weight']) # The `cat_feats` and `num_feats` lists would contain all the", "SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. <img align=\"right\" height", "# We use a Gaussian Mixture Model (GMM) with 3 components and diagonal", "title={Counterfactual Phenotyping with Censored Time-to-Events}, author={<NAME> <NAME> <NAME> <NAME>}, journal={arXiv preprint arXiv:2202.11089}, year={2022}", "to run `auton-survival` style experiments of survival analysis, involving cross-validation style experiments with", "publisher = {arXiv}, year = {2022}, } ``` Additionally, models and methods in", "differential response to a specific intervention. Relies on the specially designed `auton_survival.models.cmhe.DeepCoxMixturesHeterogenousEffects` latent", "the SurvivalModel class. model = estimators.SurvivalModel(model='dsm') model.fit(features, outcomes) # Predict risk at time", "load and prerocsss various time-to-event data like the popular `SUPPORT`, `FRAMINGHAM` and `PBC`", "of AAAI Spring Symposium on Survival Prediction - Algorithms, Challenges, and Applications 2021},", "* There is presence of censoring ie. a large number of instances of", "infer the phenogroups. phenotypes = phenotyper.fit_phenotype(features) # Plot the phenogroup specific Kaplan-Meier survival", "allows: - **Unsupervised Phenotyping**: Involves first performing dimensionality reduction on the inpute covariates", "portions of the Software. THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF", "of Machine Learning Research}, publisher={PMLR}, } ``` [3] [Deep Cox Mixtures for Survival", "model using the SurvivalModel class. model = estimators.SurvivalModel(model='dsm') model.fit(features, outcomes) # Predict risk", "prerocsss various time-to-event data like the popular `SUPPORT`, `FRAMINGHAM` and `PBC` dataset for", "common Survival Analysis tasks. Citing and References ---------------------- Please cite the following if", "survival regression and representation learning for censored data with competing risks}, author={<NAME> and", "from auton_survival import dataset features, outcomes = datasets.load_dataset('SUPPORT') ``` #### `auton_survival.preprocessing` This module", "auton_survival.reporting.plot_kaplanmeier(outcomes, phenotypes) ``` - **Factual Phenotyping**: Involves the use of structured latent variable", "<a href=\"https://autonlab.github.io/auton-survival/\">Documentation</a> &nbsp;&nbsp; • <a href=\"https://nbviewer.org/github/autonlab/auton-survival/tree/master/examples/\">Demo Notebooks</a></h3> What is Survival Analysis? -------------------------- **Survival", "T \\in [0, \\infty) \\). * There is presence of censoring ie. a", "and `PBC` dataset for survival analysis. #### `auton_survival.datasets` ```python # Load the SUPPORT", "downstream machine learning models. The module has 3 distinct classes, `Scaler`, `Imputer` and", "'max_features' : ['sqrt', 'log2']} # Train a RSF model with cross-validation using the", "Analysis** involves estimating when an event of interest, \\( T \\) would take", "Time-to-Event Data}, publisher = {arXiv}, year = {2022}, } ``` Additionally, models and", "above hyperparameters. phenotyper = ClusteringPhenotyper(clustering_method=clustering_method, dim_red_method=dim_red_method, n_components=n_components, n_clusters=n_clusters) # Fit and infer the", "IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE", "`auton_survival.phenotyping` `auton_survival.phenotyping` allows extraction of latent clusters or subgroups of patients that demonstrate", "{arXiv}, year = {2022}, } ``` Additionally, models and methods in `auton_survival` come", "EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS", "subject to the following conditions: The above copyright notice and this permission notice", "PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE", "= {<NAME> <NAME> <NAME>}, title = {auton-survival: an Open-Source Package for Regression, Counterfactual", "Analysis tasks. Citing and References ---------------------- Please cite the following if you use", "<NAME>}, booktitle={Proceedings of AAAI Spring Symposium on Survival Prediction - Algorithms, Challenges, and", "survival analysis models ```python # auton-survival Style Cross Validation Experiment. from auton_survival.experiments import", "wrapper `auton_survival.estimators.SurvivalModel` to model survival datasets with standard survival (time-to-event) analysis methods. The", "Time-to-Event Regression with Time-Varying Covariates}, author={<NAME> <NAME> <NAME>}, booktitle={Proceedings of AAAI Spring Symposium", "FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,", "structured latent variable models, `auton_survival.models.dcm.DeepCoxMixtures` or `auton_survival.models.dsm.DeepSurvivalMachines` to recover phenogroups that demonstrate differential", "Machines: Fully Parametric Survival Regression and Representation Learning for Censored Data with Competing", "to generate standard reports for common Survival Analysis tasks. Citing and References ----------------------", "hereby granted, free of charge, to any person obtaining a copy of this", "THE USE OR OTHER DEALINGS IN THE SOFTWARE. <img align=\"right\" height =\"120px\" src=\"https://www.cs.cmu.edu/~chiragn/cmu_logo.jpeg\">", "provides a top-level interface to run `auton-survival` style experiments of survival analysis, involving", "with the above hyperparameters. phenotyper = ClusteringPhenotyper(clustering_method=clustering_method, dim_red_method=dim_red_method, n_components=n_components, n_clusters=n_clusters) # Fit and", "2021}, series={Proceedings of Machine Learning Research}, publisher={PMLR}, } ``` [3] [Deep Cox Mixtures", "restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute,", "In statistics and ML these scenarious are modelled as regression to estimate the", "Regression and Representation Learning for Censored Data with Competing Risks.\" IEEE Journal of", "methods. `auton_survival.estimators` also provides convenient wrappers around other popular python survival analysis packages", "with Censored Time-to-Events}, author={<NAME> <NAME> <NAME> <NAME>}, journal={arXiv preprint arXiv:2202.11089}, year={2022} } ```", "and Health Informatics (2021)](https://arxiv.org/abs/2003.01176)</a> ``` @article{nagpal2021dsm, title={Deep survival machines: Fully parametric survival regression", "a single function call. ```python # Preprocessing loaded Datasets from auton_survival import datasets", "at time horizons. predictions = model.predict_risk(features, times=[8, 12, 16]) ``` #### `auton_survival.experiments` Modules", "has 3 distinct classes, `Scaler`, `Imputer` and `Preprocessor`. The `Preprocessor` class is a", "Estimation, Evaluation and Phenotyping with Censored Time-to-Event Data}, publisher = {arXiv}, year =", "AAAI Spring Symposium on Survival Prediction - Algorithms, Challenges, and Applications 2021}, series={Proceedings", "Conference on Machine Learning for Healthcare (2021)](https://arxiv.org/abs/2101.06536)</a> ``` @inproceedings{nagpal2021dcm, title={Deep Cox mixtures for", "recover phenogroups that demonstrate differential observed survival rates. - **Counterfactual Phenotyping**: Involves learning", "class. model = estimators.SurvivalModel(model='dsm') model.fit(features, outcomes) # Predict risk at time horizons. predictions" ]
[ "open(\"outFiles/outcontent.txt\", mode=\"a\", encoding=\"utf-8\") filecontent.write(\"\\n\\n\\n=========={}==========\\n\".format(filename)) filecontent.write(\"\\n\".join(str(el) for el in outcontent)) except IOError as identifier:", "filecontent = open(\"outFiles/outcontent.txt\", mode=\"a\", encoding=\"utf-8\") filecontent.write(\"\\n\\n\\n=========={}==========\\n\".format(filename)) filecontent.write(\"\\n\".join(str(el) for el in outcontent)) except IOError", "mode=\"a\", encoding=\"utf-8\") filecontent.write(\"\\n\\n\\n=========={}==========\\n\".format(filename)) filecontent.write(\"\\n\".join(str(el) for el in outcontent)) except IOError as identifier: print(str(identifier))", "try: filecontent = open(\"outFiles/outcontent.txt\", mode=\"a\", encoding=\"utf-8\") filecontent.write(\"\\n\\n\\n=========={}==========\\n\".format(filename)) filecontent.write(\"\\n\".join(str(el) for el in outcontent)) except", "<filename>src/packModules/filewrite.py def filewrite(outcontent,filename): try: filecontent = open(\"outFiles/outcontent.txt\", mode=\"a\", encoding=\"utf-8\") filecontent.write(\"\\n\\n\\n=========={}==========\\n\".format(filename)) filecontent.write(\"\\n\".join(str(el) for el", "encoding=\"utf-8\") filecontent.write(\"\\n\\n\\n=========={}==========\\n\".format(filename)) filecontent.write(\"\\n\".join(str(el) for el in outcontent)) except IOError as identifier: print(str(identifier)) finally:", "= open(\"outFiles/outcontent.txt\", mode=\"a\", encoding=\"utf-8\") filecontent.write(\"\\n\\n\\n=========={}==========\\n\".format(filename)) filecontent.write(\"\\n\".join(str(el) for el in outcontent)) except IOError as", "filewrite(outcontent,filename): try: filecontent = open(\"outFiles/outcontent.txt\", mode=\"a\", encoding=\"utf-8\") filecontent.write(\"\\n\\n\\n=========={}==========\\n\".format(filename)) filecontent.write(\"\\n\".join(str(el) for el in outcontent))", "def filewrite(outcontent,filename): try: filecontent = open(\"outFiles/outcontent.txt\", mode=\"a\", encoding=\"utf-8\") filecontent.write(\"\\n\\n\\n=========={}==========\\n\".format(filename)) filecontent.write(\"\\n\".join(str(el) for el in", "filecontent.write(\"\\n\\n\\n=========={}==========\\n\".format(filename)) filecontent.write(\"\\n\".join(str(el) for el in outcontent)) except IOError as identifier: print(str(identifier)) finally: filecontent.close()" ]
[ "models.ForeignKey(User, on_delete=models.DO_NOTHING, related_name='cards') language = models.ForeignKey(Language, on_delete=models.DO_NOTHING, related_name='cards') times_shown = models.IntegerField(default=0) answered_correctly =", "if not created: grant_achievements(instance) else: grant_achievements(instance.user) def grant_achievements(user): for achievement in Achievement.objects.all(): achievement.try_award_to(user)", "times_shown = models.IntegerField(default=0) answered_correctly = models.IntegerField(default=0) @property def difficulty(self): if self.times_shown is 0:", "if condition_result: user.achievements.add(self) return True else: return False class UserFollowing(models.Model): user = models.ForeignKey(User,", "<= ratio < 0.25: return \"INSANE\" elif 0.25 <= ratio < 0.5: return", "User from django.dispatch import receiver from django.db.models.signals import post_save @receiver(post_save, sender=User) def create_blank_statistics(sender,", "( (\"1\", \"Bronze\"), (\"2\", \"Silver\"), (\"3\", \"Gold\"), (\"4\", \"Diamond\"), ) condition = models.TextField(max_length=2048)", "blank=True) level = models.CharField(max_length=1, choices=LEVEL_CHOICES) score = models.IntegerField() def __str__(self): return str(self.name) def", "= models.ForeignKey(User, on_delete=models.CASCADE, related_name='followed_by') class Statistic(models.Model): user = models.OneToOneField(User, on_delete=models.CASCADE, related_name='statistics') correctly_swiped_taboo_cards =", "2) else: return 0 def __str__(self): return str(self.pk) + ' | ' +", "created=False, **kwargs): if created: Statistic.objects.create(user=instance) class Language(models.Model): name = models.CharField(max_length=32) users = models.ManyToManyField(User,", "related_name='cards') language = models.ForeignKey(Language, on_delete=models.DO_NOTHING, related_name='cards') times_shown = models.IntegerField(default=0) answered_correctly = models.IntegerField(default=0) @property", "models.ForeignKey(Language, on_delete=models.DO_NOTHING, related_name='cards') times_shown = models.IntegerField(default=0) answered_correctly = models.IntegerField(default=0) @property def difficulty(self): if", "blank=True) language_code = models.CharField(max_length=32, null=True, blank=True) def __str__(self): return self.name def __eq__(self, other):", "else: return 0 def __str__(self): return str(self.pk) + ' | ' + str(self.key_word)", "created: Statistic.objects.create(user=instance) class Language(models.Model): name = models.CharField(max_length=32) users = models.ManyToManyField(User, related_name='selected_languages', blank=True) language_code", "@receiver(post_save, sender=User) def create_blank_statistics(sender, instance=None, created=False, **kwargs): if created: Statistic.objects.create(user=instance) class Language(models.Model): name", "Fraction from django.db import models from django.contrib.auth.models import User from django.dispatch import receiver", "if created: Statistic.objects.create(user=instance) class Language(models.Model): name = models.CharField(max_length=32) users = models.ManyToManyField(User, related_name='selected_languages', blank=True)", "' + str(self.key_word) + ' | ' + str(self.language.language_code) @receiver(post_save, sender=Statistic) @receiver(post_save, sender=UserFollowing)", "models.IntegerField(default=0) ans_flashcards = models.IntegerField(default=0) translated_words = models.IntegerField(default=0) @property def taboo_efficiency(self): if self.swiped_taboo_cards is", "= models.CharField(max_length=1, choices=LEVEL_CHOICES) score = models.IntegerField() def __str__(self): return str(self.name) def try_award_to(self, user):", "self.times_shown is not 0: return round(Fraction(self.answered_correctly, self.times_shown), 2) else: return 0 def __str__(self):", "owner = models.ForeignKey(User, on_delete=models.DO_NOTHING, related_name='cards') language = models.ForeignKey(Language, on_delete=models.DO_NOTHING, related_name='cards') times_shown = models.IntegerField(default=0)", "str(self.language.language_code) @receiver(post_save, sender=Statistic) @receiver(post_save, sender=UserFollowing) @receiver(post_save, sender=User) def trigger_achievements_after_statistics_save(sender, instance=None, created=False, **kwargs): if", "models.TextField(max_length=2048) name = models.CharField(max_length=128) font_awesome_icon = models.TextField(max_length=2048) users = models.ManyToManyField(User, related_name=\"achievements\", blank=True) level", "elif 0.25 <= ratio < 0.5: return \"HARD\" elif 0.5 <= ratio <", "Statistic(models.Model): user = models.OneToOneField(User, on_delete=models.CASCADE, related_name='statistics') correctly_swiped_taboo_cards = models.IntegerField(default=0) swiped_taboo_cards = models.IntegerField(default=0) correctly_ans_flashcards", "0 class TabooCard(models.Model): key_word = models.CharField(max_length=128) black_list = models.CharField(max_length=2048) owner = models.ForeignKey(User, on_delete=models.DO_NOTHING,", "= models.CharField(max_length=32) users = models.ManyToManyField(User, related_name='selected_languages', blank=True) language_code = models.CharField(max_length=32, null=True, blank=True) def", "else: return False class Achievement(models.Model): LEVEL_CHOICES = ( (\"1\", \"Bronze\"), (\"2\", \"Silver\"), (\"3\",", "models.ForeignKey(User, on_delete=models.CASCADE, related_name='following') following = models.ForeignKey(User, on_delete=models.CASCADE, related_name='followed_by') class Statistic(models.Model): user = models.OneToOneField(User,", "return self.name == other.name else: return False class Achievement(models.Model): LEVEL_CHOICES = ( (\"1\",", "models.OneToOneField(User, on_delete=models.CASCADE, related_name='statistics') correctly_swiped_taboo_cards = models.IntegerField(default=0) swiped_taboo_cards = models.IntegerField(default=0) correctly_ans_flashcards = models.IntegerField(default=0) ans_flashcards", "elif 0.5 <= ratio < 0.75: return \"MEDIUM\" elif 0.75 <= ratio: return", "__str__(self): return str(self.pk) + ' | ' + str(self.key_word) + ' | '", "@receiver(post_save, sender=User) def trigger_achievements_after_statistics_save(sender, instance=None, created=False, **kwargs): if isinstance(instance, User): if not created:", "sender=User) def trigger_achievements_after_statistics_save(sender, instance=None, created=False, **kwargs): if isinstance(instance, User): if not created: grant_achievements(instance)", "not 0: return round(Fraction(self.answered_correctly, self.times_shown), 2) else: return 0 def __str__(self): return str(self.pk)", "\"Diamond\"), ) condition = models.TextField(max_length=2048) name = models.CharField(max_length=128) font_awesome_icon = models.TextField(max_length=2048) users =", "= models.IntegerField(default=0) answered_correctly = models.IntegerField(default=0) @property def difficulty(self): if self.times_shown is 0: return", "0.5: return \"HARD\" elif 0.5 <= ratio < 0.75: return \"MEDIUM\" elif 0.75", "from django.db.models.signals import post_save @receiver(post_save, sender=User) def create_blank_statistics(sender, instance=None, created=False, **kwargs): if created:", "user = models.OneToOneField(User, on_delete=models.CASCADE, related_name='statistics') correctly_swiped_taboo_cards = models.IntegerField(default=0) swiped_taboo_cards = models.IntegerField(default=0) correctly_ans_flashcards =", "= self in user.achievements.all() if has_achievement: return False condition_result = eval(str(self.condition)) if condition_result:", "**kwargs): if isinstance(instance, User): if not created: grant_achievements(instance) else: grant_achievements(instance.user) def grant_achievements(user): for", "= models.ManyToManyField(User, related_name=\"achievements\", blank=True) level = models.CharField(max_length=1, choices=LEVEL_CHOICES) score = models.IntegerField() def __str__(self):", "0: return round(Fraction(self.correctly_swiped_taboo_cards, self.swiped_taboo_cards), 2) else: return 0 class TabooCard(models.Model): key_word = models.CharField(max_length=128)", "ratio: return \"EASY\" @property def card_efficiency(self): if self.times_shown is not 0: return round(Fraction(self.answered_correctly,", "**kwargs): if created: Statistic.objects.create(user=instance) class Language(models.Model): name = models.CharField(max_length=32) users = models.ManyToManyField(User, related_name='selected_languages',", "following = models.ForeignKey(User, on_delete=models.CASCADE, related_name='followed_by') class Statistic(models.Model): user = models.OneToOneField(User, on_delete=models.CASCADE, related_name='statistics') correctly_swiped_taboo_cards", "0.25: return \"INSANE\" elif 0.25 <= ratio < 0.5: return \"HARD\" elif 0.5", "= ( (\"1\", \"Bronze\"), (\"2\", \"Silver\"), (\"3\", \"Gold\"), (\"4\", \"Diamond\"), ) condition =", "= models.IntegerField(default=0) translated_words = models.IntegerField(default=0) @property def taboo_efficiency(self): if self.swiped_taboo_cards is not 0:", "= models.IntegerField(default=0) @property def taboo_efficiency(self): if self.swiped_taboo_cards is not 0: return round(Fraction(self.correctly_swiped_taboo_cards, self.swiped_taboo_cards),", "taboo_efficiency(self): if self.swiped_taboo_cards is not 0: return round(Fraction(self.correctly_swiped_taboo_cards, self.swiped_taboo_cards), 2) else: return 0", "return round(Fraction(self.answered_correctly, self.times_shown), 2) else: return 0 def __str__(self): return str(self.pk) + '", "swiped_taboo_cards = models.IntegerField(default=0) correctly_ans_flashcards = models.IntegerField(default=0) ans_flashcards = models.IntegerField(default=0) translated_words = models.IntegerField(default=0) @property", "from fractions import Fraction from django.db import models from django.contrib.auth.models import User from", "class Language(models.Model): name = models.CharField(max_length=32) users = models.ManyToManyField(User, related_name='selected_languages', blank=True) language_code = models.CharField(max_length=32,", "False condition_result = eval(str(self.condition)) if condition_result: user.achievements.add(self) return True else: return False class", "is 0: return \"NOT ENOUGH STATS\" ratio = Fraction(self.answered_correctly, self.times_shown) if 0 <=", "if self.times_shown is not 0: return round(Fraction(self.answered_correctly, self.times_shown), 2) else: return 0 def", "null=True, blank=True) def __str__(self): return self.name def __eq__(self, other): if isinstance(other, Language): return", "< 0.25: return \"INSANE\" elif 0.25 <= ratio < 0.5: return \"HARD\" elif", "return str(self.pk) + ' | ' + str(self.key_word) + ' | ' +", "| ' + str(self.language.language_code) @receiver(post_save, sender=Statistic) @receiver(post_save, sender=UserFollowing) @receiver(post_save, sender=User) def trigger_achievements_after_statistics_save(sender, instance=None,", "str(self.name) def try_award_to(self, user): has_achievement = self in user.achievements.all() if has_achievement: return False", "instance=None, created=False, **kwargs): if created: Statistic.objects.create(user=instance) class Language(models.Model): name = models.CharField(max_length=32) users =", "related_name=\"achievements\", blank=True) level = models.CharField(max_length=1, choices=LEVEL_CHOICES) score = models.IntegerField() def __str__(self): return str(self.name)", "False class Achievement(models.Model): LEVEL_CHOICES = ( (\"1\", \"Bronze\"), (\"2\", \"Silver\"), (\"3\", \"Gold\"), (\"4\",", "if 0 <= ratio < 0.25: return \"INSANE\" elif 0.25 <= ratio <", "<= ratio: return \"EASY\" @property def card_efficiency(self): if self.times_shown is not 0: return", "@receiver(post_save, sender=Statistic) @receiver(post_save, sender=UserFollowing) @receiver(post_save, sender=User) def trigger_achievements_after_statistics_save(sender, instance=None, created=False, **kwargs): if isinstance(instance,", "models.CharField(max_length=2048) owner = models.ForeignKey(User, on_delete=models.DO_NOTHING, related_name='cards') language = models.ForeignKey(Language, on_delete=models.DO_NOTHING, related_name='cards') times_shown =", "= models.IntegerField(default=0) ans_flashcards = models.IntegerField(default=0) translated_words = models.IntegerField(default=0) @property def taboo_efficiency(self): if self.swiped_taboo_cards", "= models.ManyToManyField(User, related_name='selected_languages', blank=True) language_code = models.CharField(max_length=32, null=True, blank=True) def __str__(self): return self.name", "created=False, **kwargs): if isinstance(instance, User): if not created: grant_achievements(instance) else: grant_achievements(instance.user) def grant_achievements(user):", "Statistic.objects.create(user=instance) class Language(models.Model): name = models.CharField(max_length=32) users = models.ManyToManyField(User, related_name='selected_languages', blank=True) language_code =", "def __eq__(self, other): if isinstance(other, Language): return self.name == other.name else: return False", "sender=UserFollowing) @receiver(post_save, sender=User) def trigger_achievements_after_statistics_save(sender, instance=None, created=False, **kwargs): if isinstance(instance, User): if not", "= models.IntegerField() def __str__(self): return str(self.name) def try_award_to(self, user): has_achievement = self in", "models.IntegerField(default=0) answered_correctly = models.IntegerField(default=0) @property def difficulty(self): if self.times_shown is 0: return \"NOT", "ratio < 0.75: return \"MEDIUM\" elif 0.75 <= ratio: return \"EASY\" @property def", "STATS\" ratio = Fraction(self.answered_correctly, self.times_shown) if 0 <= ratio < 0.25: return \"INSANE\"", "sender=User) def create_blank_statistics(sender, instance=None, created=False, **kwargs): if created: Statistic.objects.create(user=instance) class Language(models.Model): name =", "models.CharField(max_length=1, choices=LEVEL_CHOICES) score = models.IntegerField() def __str__(self): return str(self.name) def try_award_to(self, user): has_achievement", "class TabooCard(models.Model): key_word = models.CharField(max_length=128) black_list = models.CharField(max_length=2048) owner = models.ForeignKey(User, on_delete=models.DO_NOTHING, related_name='cards')", "models.CharField(max_length=32, null=True, blank=True) def __str__(self): return self.name def __eq__(self, other): if isinstance(other, Language):", ") condition = models.TextField(max_length=2048) name = models.CharField(max_length=128) font_awesome_icon = models.TextField(max_length=2048) users = models.ManyToManyField(User,", "return self.name def __eq__(self, other): if isinstance(other, Language): return self.name == other.name else:", "import post_save @receiver(post_save, sender=User) def create_blank_statistics(sender, instance=None, created=False, **kwargs): if created: Statistic.objects.create(user=instance) class", "correctly_swiped_taboo_cards = models.IntegerField(default=0) swiped_taboo_cards = models.IntegerField(default=0) correctly_ans_flashcards = models.IntegerField(default=0) ans_flashcards = models.IntegerField(default=0) translated_words", "0: return \"NOT ENOUGH STATS\" ratio = Fraction(self.answered_correctly, self.times_shown) if 0 <= ratio", "= Fraction(self.answered_correctly, self.times_shown) if 0 <= ratio < 0.25: return \"INSANE\" elif 0.25", "name = models.CharField(max_length=32) users = models.ManyToManyField(User, related_name='selected_languages', blank=True) language_code = models.CharField(max_length=32, null=True, blank=True)", "return 0 def __str__(self): return str(self.pk) + ' | ' + str(self.key_word) +", "def create_blank_statistics(sender, instance=None, created=False, **kwargs): if created: Statistic.objects.create(user=instance) class Language(models.Model): name = models.CharField(max_length=32)", "self in user.achievements.all() if has_achievement: return False condition_result = eval(str(self.condition)) if condition_result: user.achievements.add(self)", "= models.TextField(max_length=2048) users = models.ManyToManyField(User, related_name=\"achievements\", blank=True) level = models.CharField(max_length=1, choices=LEVEL_CHOICES) score =", "@property def difficulty(self): if self.times_shown is 0: return \"NOT ENOUGH STATS\" ratio =", "User): if not created: grant_achievements(instance) else: grant_achievements(instance.user) def grant_achievements(user): for achievement in Achievement.objects.all():", "(\"4\", \"Diamond\"), ) condition = models.TextField(max_length=2048) name = models.CharField(max_length=128) font_awesome_icon = models.TextField(max_length=2048) users", "post_save @receiver(post_save, sender=User) def create_blank_statistics(sender, instance=None, created=False, **kwargs): if created: Statistic.objects.create(user=instance) class Language(models.Model):", "language = models.ForeignKey(Language, on_delete=models.DO_NOTHING, related_name='cards') times_shown = models.IntegerField(default=0) answered_correctly = models.IntegerField(default=0) @property def", "on_delete=models.CASCADE, related_name='following') following = models.ForeignKey(User, on_delete=models.CASCADE, related_name='followed_by') class Statistic(models.Model): user = models.OneToOneField(User, on_delete=models.CASCADE,", "self.times_shown) if 0 <= ratio < 0.25: return \"INSANE\" elif 0.25 <= ratio", "return \"HARD\" elif 0.5 <= ratio < 0.75: return \"MEDIUM\" elif 0.75 <=", "has_achievement = self in user.achievements.all() if has_achievement: return False condition_result = eval(str(self.condition)) if", "round(Fraction(self.answered_correctly, self.times_shown), 2) else: return 0 def __str__(self): return str(self.pk) + ' |", "return str(self.name) def try_award_to(self, user): has_achievement = self in user.achievements.all() if has_achievement: return", "' | ' + str(self.language.language_code) @receiver(post_save, sender=Statistic) @receiver(post_save, sender=UserFollowing) @receiver(post_save, sender=User) def trigger_achievements_after_statistics_save(sender,", "class Statistic(models.Model): user = models.OneToOneField(User, on_delete=models.CASCADE, related_name='statistics') correctly_swiped_taboo_cards = models.IntegerField(default=0) swiped_taboo_cards = models.IntegerField(default=0)", "related_name='cards') times_shown = models.IntegerField(default=0) answered_correctly = models.IntegerField(default=0) @property def difficulty(self): if self.times_shown is", "has_achievement: return False condition_result = eval(str(self.condition)) if condition_result: user.achievements.add(self) return True else: return", "users = models.ManyToManyField(User, related_name='selected_languages', blank=True) language_code = models.CharField(max_length=32, null=True, blank=True) def __str__(self): return", "eval(str(self.condition)) if condition_result: user.achievements.add(self) return True else: return False class UserFollowing(models.Model): user =", "import Fraction from django.db import models from django.contrib.auth.models import User from django.dispatch import", "user.achievements.add(self) return True else: return False class UserFollowing(models.Model): user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='following')", "= models.CharField(max_length=2048) owner = models.ForeignKey(User, on_delete=models.DO_NOTHING, related_name='cards') language = models.ForeignKey(Language, on_delete=models.DO_NOTHING, related_name='cards') times_shown", "+ ' | ' + str(self.language.language_code) @receiver(post_save, sender=Statistic) @receiver(post_save, sender=UserFollowing) @receiver(post_save, sender=User) def", "return False class Achievement(models.Model): LEVEL_CHOICES = ( (\"1\", \"Bronze\"), (\"2\", \"Silver\"), (\"3\", \"Gold\"),", "user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='following') following = models.ForeignKey(User, on_delete=models.CASCADE, related_name='followed_by') class Statistic(models.Model): user", "on_delete=models.CASCADE, related_name='statistics') correctly_swiped_taboo_cards = models.IntegerField(default=0) swiped_taboo_cards = models.IntegerField(default=0) correctly_ans_flashcards = models.IntegerField(default=0) ans_flashcards =", "not 0: return round(Fraction(self.correctly_swiped_taboo_cards, self.swiped_taboo_cards), 2) else: return 0 class TabooCard(models.Model): key_word =", "if has_achievement: return False condition_result = eval(str(self.condition)) if condition_result: user.achievements.add(self) return True else:", "\"EASY\" @property def card_efficiency(self): if self.times_shown is not 0: return round(Fraction(self.answered_correctly, self.times_shown), 2)", "models.ForeignKey(User, on_delete=models.CASCADE, related_name='followed_by') class Statistic(models.Model): user = models.OneToOneField(User, on_delete=models.CASCADE, related_name='statistics') correctly_swiped_taboo_cards = models.IntegerField(default=0)", "level = models.CharField(max_length=1, choices=LEVEL_CHOICES) score = models.IntegerField() def __str__(self): return str(self.name) def try_award_to(self,", "return True else: return False class UserFollowing(models.Model): user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='following') following", "models.IntegerField(default=0) swiped_taboo_cards = models.IntegerField(default=0) correctly_ans_flashcards = models.IntegerField(default=0) ans_flashcards = models.IntegerField(default=0) translated_words = models.IntegerField(default=0)", "UserFollowing(models.Model): user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='following') following = models.ForeignKey(User, on_delete=models.CASCADE, related_name='followed_by') class Statistic(models.Model):", "round(Fraction(self.correctly_swiped_taboo_cards, self.swiped_taboo_cards), 2) else: return 0 class TabooCard(models.Model): key_word = models.CharField(max_length=128) black_list =", "return 0 class TabooCard(models.Model): key_word = models.CharField(max_length=128) black_list = models.CharField(max_length=2048) owner = models.ForeignKey(User,", "\"INSANE\" elif 0.25 <= ratio < 0.5: return \"HARD\" elif 0.5 <= ratio", "elif 0.75 <= ratio: return \"EASY\" @property def card_efficiency(self): if self.times_shown is not", "0.25 <= ratio < 0.5: return \"HARD\" elif 0.5 <= ratio < 0.75:", "\"HARD\" elif 0.5 <= ratio < 0.75: return \"MEDIUM\" elif 0.75 <= ratio:", "def card_efficiency(self): if self.times_shown is not 0: return round(Fraction(self.answered_correctly, self.times_shown), 2) else: return", "True else: return False class UserFollowing(models.Model): user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='following') following =", "(\"1\", \"Bronze\"), (\"2\", \"Silver\"), (\"3\", \"Gold\"), (\"4\", \"Diamond\"), ) condition = models.TextField(max_length=2048) name", "ratio = Fraction(self.answered_correctly, self.times_shown) if 0 <= ratio < 0.25: return \"INSANE\" elif", "= models.ForeignKey(Language, on_delete=models.DO_NOTHING, related_name='cards') times_shown = models.IntegerField(default=0) answered_correctly = models.IntegerField(default=0) @property def difficulty(self):", "str(self.key_word) + ' | ' + str(self.language.language_code) @receiver(post_save, sender=Statistic) @receiver(post_save, sender=UserFollowing) @receiver(post_save, sender=User)", "Language(models.Model): name = models.CharField(max_length=32) users = models.ManyToManyField(User, related_name='selected_languages', blank=True) language_code = models.CharField(max_length=32, null=True,", "= models.OneToOneField(User, on_delete=models.CASCADE, related_name='statistics') correctly_swiped_taboo_cards = models.IntegerField(default=0) swiped_taboo_cards = models.IntegerField(default=0) correctly_ans_flashcards = models.IntegerField(default=0)", "translated_words = models.IntegerField(default=0) @property def taboo_efficiency(self): if self.swiped_taboo_cards is not 0: return round(Fraction(self.correctly_swiped_taboo_cards,", "< 0.75: return \"MEDIUM\" elif 0.75 <= ratio: return \"EASY\" @property def card_efficiency(self):", "def __str__(self): return str(self.pk) + ' | ' + str(self.key_word) + ' |", "blank=True) def __str__(self): return self.name def __eq__(self, other): if isinstance(other, Language): return self.name", "str(self.pk) + ' | ' + str(self.key_word) + ' | ' + str(self.language.language_code)", "ratio < 0.25: return \"INSANE\" elif 0.25 <= ratio < 0.5: return \"HARD\"", "__eq__(self, other): if isinstance(other, Language): return self.name == other.name else: return False class", "0.75 <= ratio: return \"EASY\" @property def card_efficiency(self): if self.times_shown is not 0:", "def __str__(self): return self.name def __eq__(self, other): if isinstance(other, Language): return self.name ==", "0 def __str__(self): return str(self.pk) + ' | ' + str(self.key_word) + '", "if self.swiped_taboo_cards is not 0: return round(Fraction(self.correctly_swiped_taboo_cards, self.swiped_taboo_cards), 2) else: return 0 class", "Achievement(models.Model): LEVEL_CHOICES = ( (\"1\", \"Bronze\"), (\"2\", \"Silver\"), (\"3\", \"Gold\"), (\"4\", \"Diamond\"), )", "user.achievements.all() if has_achievement: return False condition_result = eval(str(self.condition)) if condition_result: user.achievements.add(self) return True", "isinstance(instance, User): if not created: grant_achievements(instance) else: grant_achievements(instance.user) def grant_achievements(user): for achievement in", "def try_award_to(self, user): has_achievement = self in user.achievements.all() if has_achievement: return False condition_result", "from django.db import models from django.contrib.auth.models import User from django.dispatch import receiver from", "class Achievement(models.Model): LEVEL_CHOICES = ( (\"1\", \"Bronze\"), (\"2\", \"Silver\"), (\"3\", \"Gold\"), (\"4\", \"Diamond\"),", "models.ManyToManyField(User, related_name=\"achievements\", blank=True) level = models.CharField(max_length=1, choices=LEVEL_CHOICES) score = models.IntegerField() def __str__(self): return", "= models.IntegerField(default=0) correctly_ans_flashcards = models.IntegerField(default=0) ans_flashcards = models.IntegerField(default=0) translated_words = models.IntegerField(default=0) @property def", "def taboo_efficiency(self): if self.swiped_taboo_cards is not 0: return round(Fraction(self.correctly_swiped_taboo_cards, self.swiped_taboo_cards), 2) else: return", "' + str(self.language.language_code) @receiver(post_save, sender=Statistic) @receiver(post_save, sender=UserFollowing) @receiver(post_save, sender=User) def trigger_achievements_after_statistics_save(sender, instance=None, created=False,", "0 <= ratio < 0.25: return \"INSANE\" elif 0.25 <= ratio < 0.5:", "django.db.models.signals import post_save @receiver(post_save, sender=User) def create_blank_statistics(sender, instance=None, created=False, **kwargs): if created: Statistic.objects.create(user=instance)", "other): if isinstance(other, Language): return self.name == other.name else: return False class Achievement(models.Model):", "0: return round(Fraction(self.answered_correctly, self.times_shown), 2) else: return 0 def __str__(self): return str(self.pk) +", "= models.CharField(max_length=128) black_list = models.CharField(max_length=2048) owner = models.ForeignKey(User, on_delete=models.DO_NOTHING, related_name='cards') language = models.ForeignKey(Language,", "trigger_achievements_after_statistics_save(sender, instance=None, created=False, **kwargs): if isinstance(instance, User): if not created: grant_achievements(instance) else: grant_achievements(instance.user)", "ans_flashcards = models.IntegerField(default=0) translated_words = models.IntegerField(default=0) @property def taboo_efficiency(self): if self.swiped_taboo_cards is not", "models.IntegerField(default=0) @property def difficulty(self): if self.times_shown is 0: return \"NOT ENOUGH STATS\" ratio", "is not 0: return round(Fraction(self.answered_correctly, self.times_shown), 2) else: return 0 def __str__(self): return", "score = models.IntegerField() def __str__(self): return str(self.name) def try_award_to(self, user): has_achievement = self", "related_name='following') following = models.ForeignKey(User, on_delete=models.CASCADE, related_name='followed_by') class Statistic(models.Model): user = models.OneToOneField(User, on_delete=models.CASCADE, related_name='statistics')", "False class UserFollowing(models.Model): user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='following') following = models.ForeignKey(User, on_delete=models.CASCADE, related_name='followed_by')", "sender=Statistic) @receiver(post_save, sender=UserFollowing) @receiver(post_save, sender=User) def trigger_achievements_after_statistics_save(sender, instance=None, created=False, **kwargs): if isinstance(instance, User):", "+ ' | ' + str(self.key_word) + ' | ' + str(self.language.language_code) @receiver(post_save,", "TabooCard(models.Model): key_word = models.CharField(max_length=128) black_list = models.CharField(max_length=2048) owner = models.ForeignKey(User, on_delete=models.DO_NOTHING, related_name='cards') language", "def __str__(self): return str(self.name) def try_award_to(self, user): has_achievement = self in user.achievements.all() if", "user): has_achievement = self in user.achievements.all() if has_achievement: return False condition_result = eval(str(self.condition))", "models.IntegerField(default=0) correctly_ans_flashcards = models.IntegerField(default=0) ans_flashcards = models.IntegerField(default=0) translated_words = models.IntegerField(default=0) @property def taboo_efficiency(self):", "related_name='selected_languages', blank=True) language_code = models.CharField(max_length=32, null=True, blank=True) def __str__(self): return self.name def __eq__(self,", "self.name def __eq__(self, other): if isinstance(other, Language): return self.name == other.name else: return", "choices=LEVEL_CHOICES) score = models.IntegerField() def __str__(self): return str(self.name) def try_award_to(self, user): has_achievement =", "= eval(str(self.condition)) if condition_result: user.achievements.add(self) return True else: return False class UserFollowing(models.Model): user", "ratio < 0.5: return \"HARD\" elif 0.5 <= ratio < 0.75: return \"MEDIUM\"", "return round(Fraction(self.correctly_swiped_taboo_cards, self.swiped_taboo_cards), 2) else: return 0 class TabooCard(models.Model): key_word = models.CharField(max_length=128) black_list", "condition = models.TextField(max_length=2048) name = models.CharField(max_length=128) font_awesome_icon = models.TextField(max_length=2048) users = models.ManyToManyField(User, related_name=\"achievements\",", "django.db import models from django.contrib.auth.models import User from django.dispatch import receiver from django.db.models.signals", "self.swiped_taboo_cards is not 0: return round(Fraction(self.correctly_swiped_taboo_cards, self.swiped_taboo_cards), 2) else: return 0 class TabooCard(models.Model):", "< 0.5: return \"HARD\" elif 0.5 <= ratio < 0.75: return \"MEDIUM\" elif", "__str__(self): return str(self.name) def try_award_to(self, user): has_achievement = self in user.achievements.all() if has_achievement:", "in user.achievements.all() if has_achievement: return False condition_result = eval(str(self.condition)) if condition_result: user.achievements.add(self) return", "django.contrib.auth.models import User from django.dispatch import receiver from django.db.models.signals import post_save @receiver(post_save, sender=User)", "= models.CharField(max_length=128) font_awesome_icon = models.TextField(max_length=2048) users = models.ManyToManyField(User, related_name=\"achievements\", blank=True) level = models.CharField(max_length=1,", "@receiver(post_save, sender=UserFollowing) @receiver(post_save, sender=User) def trigger_achievements_after_statistics_save(sender, instance=None, created=False, **kwargs): if isinstance(instance, User): if", "models from django.contrib.auth.models import User from django.dispatch import receiver from django.db.models.signals import post_save", "(\"3\", \"Gold\"), (\"4\", \"Diamond\"), ) condition = models.TextField(max_length=2048) name = models.CharField(max_length=128) font_awesome_icon =", "name = models.CharField(max_length=128) font_awesome_icon = models.TextField(max_length=2048) users = models.ManyToManyField(User, related_name=\"achievements\", blank=True) level =", "other.name else: return False class Achievement(models.Model): LEVEL_CHOICES = ( (\"1\", \"Bronze\"), (\"2\", \"Silver\"),", "else: return 0 class TabooCard(models.Model): key_word = models.CharField(max_length=128) black_list = models.CharField(max_length=2048) owner =", "' | ' + str(self.key_word) + ' | ' + str(self.language.language_code) @receiver(post_save, sender=Statistic)", "\"NOT ENOUGH STATS\" ratio = Fraction(self.answered_correctly, self.times_shown) if 0 <= ratio < 0.25:", "import receiver from django.db.models.signals import post_save @receiver(post_save, sender=User) def create_blank_statistics(sender, instance=None, created=False, **kwargs):", "self.times_shown is 0: return \"NOT ENOUGH STATS\" ratio = Fraction(self.answered_correctly, self.times_shown) if 0", "return \"EASY\" @property def card_efficiency(self): if self.times_shown is not 0: return round(Fraction(self.answered_correctly, self.times_shown),", "if isinstance(instance, User): if not created: grant_achievements(instance) else: grant_achievements(instance.user) def grant_achievements(user): for achievement", "return \"INSANE\" elif 0.25 <= ratio < 0.5: return \"HARD\" elif 0.5 <=", "import models from django.contrib.auth.models import User from django.dispatch import receiver from django.db.models.signals import", "on_delete=models.DO_NOTHING, related_name='cards') times_shown = models.IntegerField(default=0) answered_correctly = models.IntegerField(default=0) @property def difficulty(self): if self.times_shown", "return False condition_result = eval(str(self.condition)) if condition_result: user.achievements.add(self) return True else: return False", "+ str(self.language.language_code) @receiver(post_save, sender=Statistic) @receiver(post_save, sender=UserFollowing) @receiver(post_save, sender=User) def trigger_achievements_after_statistics_save(sender, instance=None, created=False, **kwargs):", "(\"2\", \"Silver\"), (\"3\", \"Gold\"), (\"4\", \"Diamond\"), ) condition = models.TextField(max_length=2048) name = models.CharField(max_length=128)", "models.TextField(max_length=2048) users = models.ManyToManyField(User, related_name=\"achievements\", blank=True) level = models.CharField(max_length=1, choices=LEVEL_CHOICES) score = models.IntegerField()", "import User from django.dispatch import receiver from django.db.models.signals import post_save @receiver(post_save, sender=User) def", "card_efficiency(self): if self.times_shown is not 0: return round(Fraction(self.answered_correctly, self.times_shown), 2) else: return 0", "self.times_shown), 2) else: return 0 def __str__(self): return str(self.pk) + ' | '", "font_awesome_icon = models.TextField(max_length=2048) users = models.ManyToManyField(User, related_name=\"achievements\", blank=True) level = models.CharField(max_length=1, choices=LEVEL_CHOICES) score", "+ str(self.key_word) + ' | ' + str(self.language.language_code) @receiver(post_save, sender=Statistic) @receiver(post_save, sender=UserFollowing) @receiver(post_save,", "models.CharField(max_length=32) users = models.ManyToManyField(User, related_name='selected_languages', blank=True) language_code = models.CharField(max_length=32, null=True, blank=True) def __str__(self):", "LEVEL_CHOICES = ( (\"1\", \"Bronze\"), (\"2\", \"Silver\"), (\"3\", \"Gold\"), (\"4\", \"Diamond\"), ) condition", "receiver from django.db.models.signals import post_save @receiver(post_save, sender=User) def create_blank_statistics(sender, instance=None, created=False, **kwargs): if", "= models.ForeignKey(User, on_delete=models.CASCADE, related_name='following') following = models.ForeignKey(User, on_delete=models.CASCADE, related_name='followed_by') class Statistic(models.Model): user =", "django.dispatch import receiver from django.db.models.signals import post_save @receiver(post_save, sender=User) def create_blank_statistics(sender, instance=None, created=False,", "= models.ForeignKey(User, on_delete=models.DO_NOTHING, related_name='cards') language = models.ForeignKey(Language, on_delete=models.DO_NOTHING, related_name='cards') times_shown = models.IntegerField(default=0) answered_correctly", "answered_correctly = models.IntegerField(default=0) @property def difficulty(self): if self.times_shown is 0: return \"NOT ENOUGH", "related_name='followed_by') class Statistic(models.Model): user = models.OneToOneField(User, on_delete=models.CASCADE, related_name='statistics') correctly_swiped_taboo_cards = models.IntegerField(default=0) swiped_taboo_cards =", "related_name='statistics') correctly_swiped_taboo_cards = models.IntegerField(default=0) swiped_taboo_cards = models.IntegerField(default=0) correctly_ans_flashcards = models.IntegerField(default=0) ans_flashcards = models.IntegerField(default=0)", "create_blank_statistics(sender, instance=None, created=False, **kwargs): if created: Statistic.objects.create(user=instance) class Language(models.Model): name = models.CharField(max_length=32) users", "if isinstance(other, Language): return self.name == other.name else: return False class Achievement(models.Model): LEVEL_CHOICES", "is not 0: return round(Fraction(self.correctly_swiped_taboo_cards, self.swiped_taboo_cards), 2) else: return 0 class TabooCard(models.Model): key_word", "fractions import Fraction from django.db import models from django.contrib.auth.models import User from django.dispatch", "= models.CharField(max_length=32, null=True, blank=True) def __str__(self): return self.name def __eq__(self, other): if isinstance(other,", "\"Gold\"), (\"4\", \"Diamond\"), ) condition = models.TextField(max_length=2048) name = models.CharField(max_length=128) font_awesome_icon = models.TextField(max_length=2048)", "self.swiped_taboo_cards), 2) else: return 0 class TabooCard(models.Model): key_word = models.CharField(max_length=128) black_list = models.CharField(max_length=2048)", "ENOUGH STATS\" ratio = Fraction(self.answered_correctly, self.times_shown) if 0 <= ratio < 0.25: return", "= models.IntegerField(default=0) @property def difficulty(self): if self.times_shown is 0: return \"NOT ENOUGH STATS\"", "return False class UserFollowing(models.Model): user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='following') following = models.ForeignKey(User, on_delete=models.CASCADE,", "0.5 <= ratio < 0.75: return \"MEDIUM\" elif 0.75 <= ratio: return \"EASY\"", "class UserFollowing(models.Model): user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='following') following = models.ForeignKey(User, on_delete=models.CASCADE, related_name='followed_by') class", "| ' + str(self.key_word) + ' | ' + str(self.language.language_code) @receiver(post_save, sender=Statistic) @receiver(post_save,", "try_award_to(self, user): has_achievement = self in user.achievements.all() if has_achievement: return False condition_result =", "from django.dispatch import receiver from django.db.models.signals import post_save @receiver(post_save, sender=User) def create_blank_statistics(sender, instance=None,", "__str__(self): return self.name def __eq__(self, other): if isinstance(other, Language): return self.name == other.name", "users = models.ManyToManyField(User, related_name=\"achievements\", blank=True) level = models.CharField(max_length=1, choices=LEVEL_CHOICES) score = models.IntegerField() def", "language_code = models.CharField(max_length=32, null=True, blank=True) def __str__(self): return self.name def __eq__(self, other): if", "@property def card_efficiency(self): if self.times_shown is not 0: return round(Fraction(self.answered_correctly, self.times_shown), 2) else:", "models.CharField(max_length=128) black_list = models.CharField(max_length=2048) owner = models.ForeignKey(User, on_delete=models.DO_NOTHING, related_name='cards') language = models.ForeignKey(Language, on_delete=models.DO_NOTHING,", "difficulty(self): if self.times_shown is 0: return \"NOT ENOUGH STATS\" ratio = Fraction(self.answered_correctly, self.times_shown)", "def difficulty(self): if self.times_shown is 0: return \"NOT ENOUGH STATS\" ratio = Fraction(self.answered_correctly,", "2) else: return 0 class TabooCard(models.Model): key_word = models.CharField(max_length=128) black_list = models.CharField(max_length=2048) owner", "key_word = models.CharField(max_length=128) black_list = models.CharField(max_length=2048) owner = models.ForeignKey(User, on_delete=models.DO_NOTHING, related_name='cards') language =", "black_list = models.CharField(max_length=2048) owner = models.ForeignKey(User, on_delete=models.DO_NOTHING, related_name='cards') language = models.ForeignKey(Language, on_delete=models.DO_NOTHING, related_name='cards')", "Language): return self.name == other.name else: return False class Achievement(models.Model): LEVEL_CHOICES = (", "def trigger_achievements_after_statistics_save(sender, instance=None, created=False, **kwargs): if isinstance(instance, User): if not created: grant_achievements(instance) else:", "if self.times_shown is 0: return \"NOT ENOUGH STATS\" ratio = Fraction(self.answered_correctly, self.times_shown) if", "self.name == other.name else: return False class Achievement(models.Model): LEVEL_CHOICES = ( (\"1\", \"Bronze\"),", "on_delete=models.DO_NOTHING, related_name='cards') language = models.ForeignKey(Language, on_delete=models.DO_NOTHING, related_name='cards') times_shown = models.IntegerField(default=0) answered_correctly = models.IntegerField(default=0)", "= models.TextField(max_length=2048) name = models.CharField(max_length=128) font_awesome_icon = models.TextField(max_length=2048) users = models.ManyToManyField(User, related_name=\"achievements\", blank=True)", "models.IntegerField(default=0) @property def taboo_efficiency(self): if self.swiped_taboo_cards is not 0: return round(Fraction(self.correctly_swiped_taboo_cards, self.swiped_taboo_cards), 2)", "correctly_ans_flashcards = models.IntegerField(default=0) ans_flashcards = models.IntegerField(default=0) translated_words = models.IntegerField(default=0) @property def taboo_efficiency(self): if", "condition_result = eval(str(self.condition)) if condition_result: user.achievements.add(self) return True else: return False class UserFollowing(models.Model):", "isinstance(other, Language): return self.name == other.name else: return False class Achievement(models.Model): LEVEL_CHOICES =", "models.CharField(max_length=128) font_awesome_icon = models.TextField(max_length=2048) users = models.ManyToManyField(User, related_name=\"achievements\", blank=True) level = models.CharField(max_length=1, choices=LEVEL_CHOICES)", "models.IntegerField(default=0) translated_words = models.IntegerField(default=0) @property def taboo_efficiency(self): if self.swiped_taboo_cards is not 0: return", "else: return False class UserFollowing(models.Model): user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='following') following = models.ForeignKey(User,", "from django.contrib.auth.models import User from django.dispatch import receiver from django.db.models.signals import post_save @receiver(post_save,", "\"MEDIUM\" elif 0.75 <= ratio: return \"EASY\" @property def card_efficiency(self): if self.times_shown is", "on_delete=models.CASCADE, related_name='followed_by') class Statistic(models.Model): user = models.OneToOneField(User, on_delete=models.CASCADE, related_name='statistics') correctly_swiped_taboo_cards = models.IntegerField(default=0) swiped_taboo_cards", "@property def taboo_efficiency(self): if self.swiped_taboo_cards is not 0: return round(Fraction(self.correctly_swiped_taboo_cards, self.swiped_taboo_cards), 2) else:", "0.75: return \"MEDIUM\" elif 0.75 <= ratio: return \"EASY\" @property def card_efficiency(self): if", "models.ManyToManyField(User, related_name='selected_languages', blank=True) language_code = models.CharField(max_length=32, null=True, blank=True) def __str__(self): return self.name def", "models.IntegerField() def __str__(self): return str(self.name) def try_award_to(self, user): has_achievement = self in user.achievements.all()", "condition_result: user.achievements.add(self) return True else: return False class UserFollowing(models.Model): user = models.ForeignKey(User, on_delete=models.CASCADE,", "= models.IntegerField(default=0) swiped_taboo_cards = models.IntegerField(default=0) correctly_ans_flashcards = models.IntegerField(default=0) ans_flashcards = models.IntegerField(default=0) translated_words =", "<= ratio < 0.75: return \"MEDIUM\" elif 0.75 <= ratio: return \"EASY\" @property", "return \"MEDIUM\" elif 0.75 <= ratio: return \"EASY\" @property def card_efficiency(self): if self.times_shown", "Fraction(self.answered_correctly, self.times_shown) if 0 <= ratio < 0.25: return \"INSANE\" elif 0.25 <=", "\"Silver\"), (\"3\", \"Gold\"), (\"4\", \"Diamond\"), ) condition = models.TextField(max_length=2048) name = models.CharField(max_length=128) font_awesome_icon", "instance=None, created=False, **kwargs): if isinstance(instance, User): if not created: grant_achievements(instance) else: grant_achievements(instance.user) def", "<= ratio < 0.5: return \"HARD\" elif 0.5 <= ratio < 0.75: return", "return \"NOT ENOUGH STATS\" ratio = Fraction(self.answered_correctly, self.times_shown) if 0 <= ratio <", "\"Bronze\"), (\"2\", \"Silver\"), (\"3\", \"Gold\"), (\"4\", \"Diamond\"), ) condition = models.TextField(max_length=2048) name =", "== other.name else: return False class Achievement(models.Model): LEVEL_CHOICES = ( (\"1\", \"Bronze\"), (\"2\"," ]
[ "\"\"\"Replace the Return statement with a Store statement \"\"\" return _make.Store(self.buffer_var, _make.Cast(self.dtype, node.value),", "def addition(x, y): hcl.return_(x+y) A = hcl.compute((10, 10), addition) # example 2 -", "prepare the iteration variables args = [] # list of arguments' names nargs", "if tensor is None else False with Stage(name, dtype, shape) as stage: if", "must be a tuple\") # properties for the returned tensor shape = CastRemover().mutate(shape)", "y): with hcl.if_(x > y): hcl.return_(x) with hcl.else_: hcl.return_(y) A = hcl.compute((10, 10),", "hcl.compute((10, 10), lambda x, y: x+y) # equivalent code for x in range(0,", "lambda function if exists fcompute : callable The computation rule shape : tuple,", "construction rule of the returned tensor, which must be callable. The number of", "if nargs < len(shape): for i in range(nargs, len(shape)): args.append(\"args\" + str(i)) elif", "_, _ = get_index(shape, indices, 0) st = _make.Store(buffer_var, _make.Cast(dtype, ret[tuple(ret_ivs)]), index) stage.emit(make_for(ret_ivs,", "name : str, optional The name of the returned tensor dtype : Type,", "\"\"\"Pre-process the fcompute field of an API. \"\"\" # check API correctness if", "stage._dtype, name, stage._buf) buffer_var = tensor._buf.data dtype = tensor.dtype shape = tensor.shape stage.stmt_stack.append([])", "other HeteroCL APIs, even imperative DSL. Parameters ---------- shape : tuple The shape", "data type of the placeholder Returns ------- Tensor Examples -------- .. code-block:: python", "0) else: raise APIError(\"Unknown return type of the computation rule\") # add attributes", "number of dimensions\") return args, len(shape) def compute_body(name, lambda_ivs, fcompute, shape=(), dtype=None, tensor=None,", "name=None, dtype=None, attrs=OrderedDict()): \"\"\"Construct a new tensor based on the shape and the", "10)) tA = compute_tanh(A) tB = compute_tanh(B) # example 3 - mixed-paradigm programming", "buffer_var self.dtype = dtype self.index = index def mutate_KerenlDef(self, node): \"\"\"Omit the KernelDef", "indices.append(iv) non_reduce_ivs.append(iv) if rid != len(ret.shape): raise APIError(\"Incorrect number of reduction axes in", "shape of the returned tensor fcompute : callable The construction rule for the", "from tvm import expr_hcl as _expr, stmt as _stmt from tvm.tir import IterVar", "= [] indices = [] rid = 0 for iv in lambda_ivs: if", "DSL. Parameters ---------- shape : tuple The shape of the returned tensor fcompute", "[_IterVar((0, shape[n]), args[n], 0) for n in range(0, nargs)] # call the helper", "Store statement index : Expr The index of the Store statement \"\"\" def", "one if it is `None` Returns ------- Tensor or None \"\"\" var_list =", "shape = CastRemover().mutate(shape) name = get_name(\"compute\", name) # prepare the iteration variables args,", "Attributes ---------- buffer_var : Var The buffer variable of the Store statement dtype", "_make.Store(self.buffer_var, _make.Cast(self.dtype, node.value), self.index) def process_fcompute(fcompute, shape): \"\"\"Pre-process the fcompute field of an", "each element of the returned tensor. It can contain other HeteroCL APIs, even", "else: raise APIError(\"Unknown return type of the computation rule\") # add attributes to", "attrs=OrderedDict()): \"\"\"Create a stage and perform the computation. If `tensor` is `None`, no", "if isinstance(stmt, _stmt.For): stmt = _make.For(stmt.loop_var, stmt.min, stmt.extent, 0, 0, stmt.body, list(attrs.keys()), list(attrs.values()))", "# prepare the iteration variables args, nargs = process_fcompute(fcompute, shape) lambda_ivs = [_IterVar((0,", "compute_tanh(A) tB = compute_tanh(B) # example 3 - mixed-paradigm programming def return_max(x, y):", "helper function that returns a new tensor tensor = compute_body(name, lambda_ivs, fcompute, shape,", "def return_max(x, y): with hcl.if_(x > y): hcl.return_(x) with hcl.else_: hcl.return_(y) A =", "var_list = [i.var for i in lambda_ivs] return_tensor = True if tensor is", "t in stage.lhs_tensors: t.last_update = stage stmt = None if ret is None:", "the returned tensor dtype : Type, optional The data type of the placeholder", "3 - mixed-paradigm programming def return_max(x, y): with hcl.if_(x > y): hcl.return_(x) with", "non_reduce_ivs.append(iv) if rid != len(ret.shape): raise APIError(\"Incorrect number of reduction axes in lambda", "util import get_index, get_name, make_for, CastRemover from tensor import Scalar, Tensor, TensorSlice from", "mixed-paradigm programming def return_max(x, y): with hcl.if_(x > y): hcl.return_(x) with hcl.else_: hcl.return_(y)", "len(ret.shape): raise APIError(\"Incorrect number of reduction axes in lambda arguments\") index, _, _", "Return statement with a Store statement. Attributes ---------- buffer_var : Var The buffer", "of the returned tensor. The second field `fcompute` defines the construction rule of", "definition @hcl.def_([(), ()]) def addition(x, y): hcl.return_(x+y) A = hcl.compute((10, 10), addition) #", "lambda_ivs : list of IterVar A list contains the iteration variables in the", "= hcl.placeholder((10, 10)) B = hcl.placeholder((10, 10, 10)) tA = compute_tanh(A) tB =", "self.index) def process_fcompute(fcompute, shape): \"\"\"Pre-process the fcompute field of an API. \"\"\" #", "tensor name : str, optional The name of the returned tensor dtype :", "Var The buffer variable of the Store statement dtype : Type The data", "placeholder Returns ------- Tensor Examples -------- .. code-block:: python # example 1.1 -", "name of the returned tensor dtype : Type, optional The data type of", "index, _, _ = get_index(shape, indices, 0) stage.emit(_make.Store(buffer_var, _make.Cast(dtype, ret), index)) stmt =", "0 for iv in lambda_ivs: if iv.var.name[0] == \"_\": indices.append(ret_ivs[rid]) rid += 1", "tensor shape = CastRemover().mutate(shape) name = get_name(\"compute\", name) # prepare the iteration variables", "lambda *args: hcl.tanh(X[args])) A = hcl.placeholder((10, 10)) B = hcl.placeholder((10, 10, 10)) tA", "> len(shape): raise APIError(\"The number of arguments exceeds the number of dimensions\") return", "for the returned tensor name : str, optional The name of the returned", "!= len(ret.shape): raise APIError(\"Incorrect number of reduction axes in lambda arguments\") index, _,", "Return statement with a Store statement \"\"\" return _make.Store(self.buffer_var, _make.Cast(self.dtype, node.value), self.index) def", "with Store stmt indices = lambda_ivs index, _, _ = get_index(shape, indices, 0)", "dimension of the returned tensor. The second field `fcompute` defines the construction rule", "fcompute.arg_names nargs = len(args) else: args = list(fcompute.__code__.co_varnames) nargs = fcompute.__code__.co_argcount # automatically", "stage.pop_stmt() stage.input_stages.remove(stage) if non_reduce_ivs: stmt = make_for(non_reduce_ivs, stmt, 0) else: raise APIError(\"Unknown return", "The API **returns a new tensor**. The shape must be a tuple. The", "False with Stage(name, dtype, shape) as stage: if not return_tensor: stage.input_stages.add(tensor.last_update) else: tensor", "return_tensor = True if tensor is None else False with Stage(name, dtype, shape)", "example 2 - undetermined arguments def compute_tanh(X): return hcl.compute(X.shape, lambda *args: hcl.tanh(X[args])) A", "HeteroCL\"\"\" #pylint: disable=no-member, redefined-builtin, too-many-arguments, missing-docstring import numbers from collections import OrderedDict from", "else: tensor = Tensor(shape, stage._dtype, name, stage._buf) buffer_var = tensor._buf.data dtype = tensor.dtype", "<filename>heterocl/compute_api.py \"\"\"Compute APIs in HeteroCL\"\"\" #pylint: disable=no-member, redefined-builtin, too-many-arguments, missing-docstring import numbers from", "dimensions\") return args, len(shape) def compute_body(name, lambda_ivs, fcompute, shape=(), dtype=None, tensor=None, attrs=OrderedDict()): \"\"\"Create", ": Tensor, optional The tensor to be updated. Create a new one if", "a new one if it is `None` Returns ------- Tensor or None \"\"\"", "else False with Stage(name, dtype, shape) as stage: if not return_tensor: stage.input_stages.add(tensor.last_update) else:", "None else False with Stage(name, dtype, shape) as stage: if not return_tensor: stage.input_stages.add(tensor.last_update)", "HeteroCL APIs, even imperative DSL. Parameters ---------- shape : tuple The shape of", "python # example 1.1 - anonymous lambda function A = hcl.compute((10, 10), lambda", "lambda function A = hcl.compute((10, 10), lambda x, y: x+y) # equivalent code", "updated. Create a new one if it is `None` Returns ------- Tensor or", "import Module ############################################################################## # Helper classes and functions ############################################################################## class ReplaceReturn(CastRemover): \"\"\"Replace all", "is `None`, no tensor is returned. Parameters ---------- name : str The name", "must be callable. The number of arguments should match the dimension defined by", "= compute_tanh(B) # example 3 - mixed-paradigm programming def return_max(x, y): with hcl.if_(x", "Tensor Examples -------- .. code-block:: python # example 1.1 - anonymous lambda function", "ret), index)) stmt = make_for(indices, stage.pop_stmt(), 0) elif isinstance(ret, Tensor): # reduction ret_ivs", "dtype : Type, optional The data type of the output/updated tensor tensor :", "# automatically create argument names if nargs < len(shape): for i in range(nargs,", "perform the computation. If `tensor` is `None`, no tensor is returned. Parameters ----------", "Tensor, optional The tensor to be updated. Create a new one if it", "tensor fcompute : callable The construction rule for the returned tensor name :", "addition(x, y): hcl.return_(x+y) A = hcl.compute((10, 10), addition) # example 2 - undetermined", "lambda x, y: x+y) # equivalent code for x in range(0, 10): for", "indices.append(ret_ivs[rid]) rid += 1 else: indices.append(iv) non_reduce_ivs.append(iv) if rid != len(ret.shape): raise APIError(\"Incorrect", "check*. This, however, provides users more programming flexibility. The compute function specifies how", "Parameters ---------- shape : tuple The shape of the returned tensor fcompute :", "OrderedDict from tvm import expr_hcl as _expr, stmt as _stmt from tvm.tir import", "returned tensor, which must be callable. The number of arguments should match the", "function def addition(x, y): return x+y A = hcl.compute((10, 10), addition) # example", "the computation. If `tensor` is `None`, no tensor is returned. Parameters ---------- name", "import get_index, get_name, make_for, CastRemover from tensor import Scalar, Tensor, TensorSlice from schedule", "tensor, which must be callable. The number of arguments should match the dimension", "is None: # replace all hcl.return_ with Store stmt indices = lambda_ivs index,", "function specifies how we calculate each element of the returned tensor. It can", "stage._buf) buffer_var = tensor._buf.data dtype = tensor.dtype shape = tensor.shape stage.stmt_stack.append([]) ret =", "function that returns a new tensor tensor = compute_body(name, lambda_ivs, fcompute, shape, dtype,", "stage.axis_list = indices + stage.axis_list if return_tensor: tensor._tensor = stage._op return tensor return", ": list of IterVar A list contains the iteration variables in the lambda", "return_tensor: tensor._tensor = stage._op return tensor return None ############################################################################## # APIs exposed to", "shape of compute API must be a tuple\") # properties for the returned", "str(i)) elif nargs > len(shape): raise APIError(\"The number of arguments exceeds the number", "to be updated. Create a new one if it is `None` Returns -------", "for i in range(nargs, len(shape)): args.append(\"args\" + str(i)) elif nargs > len(shape): raise", ": str, optional The name of the returned tensor dtype : Type, optional", "an API. \"\"\" # check API correctness if not callable(fcompute): raise APIError(\"The construction", "0 # number of arguments if isinstance(fcompute, Module): args = fcompute.arg_names nargs =", "# check API correctness if not isinstance(shape, tuple): raise APIError(\"The shape of compute", "tensor = Tensor(shape, stage._dtype, name, stage._buf) buffer_var = tensor._buf.data dtype = tensor.dtype shape", "self.buffer_var = buffer_var self.dtype = dtype self.index = index def mutate_KerenlDef(self, node): \"\"\"Omit", "isinstance(ret, Tensor): # reduction ret_ivs = [_IterVar((0, ret.shape[i]), ret.name+\"_i\" + str(i), 0) for", "if not callable(fcompute): raise APIError(\"The construction rule must be callable\") # prepare the", "the tuple decides the dimension of the returned tensor. The second field `fcompute`", "# example 3 - mixed-paradigm programming def return_max(x, y): with hcl.if_(x > y):", "mutate_Return(self, node): \"\"\"Replace the Return statement with a Store statement \"\"\" return _make.Store(self.buffer_var,", "ret_ivs = [_IterVar((0, ret.shape[i]), ret.name+\"_i\" + str(i), 0) for i in range(0, len(ret.shape))]", "arguments\") index, _, _ = get_index(shape, indices, 0) st = _make.Store(buffer_var, _make.Cast(dtype, ret[tuple(ret_ivs)]),", "Tensor or None \"\"\" var_list = [i.var for i in lambda_ivs] return_tensor =", "the Return statement with a Store statement \"\"\" return _make.Store(self.buffer_var, _make.Cast(self.dtype, node.value), self.index)", "We do not need to replace the Return statement inside. \"\"\" #pylint: disable=no-self-use", "we calculate each element of the returned tensor. It can contain other HeteroCL", "returned. Parameters ---------- name : str The name of the stage lambda_ivs :", "addition) # example 1.3 - imperative function definition @hcl.def_([(), ()]) def addition(x, y):", "#pylint: disable=no-member, redefined-builtin, too-many-arguments, missing-docstring import numbers from collections import OrderedDict from tvm", "axes in lambda arguments\") index, _, _ = get_index(shape, indices, 0) st =", "stage stmt = None if ret is None: # replace all hcl.return_ with", "with a Store statement \"\"\" return _make.Store(self.buffer_var, _make.Cast(self.dtype, node.value), self.index) def process_fcompute(fcompute, shape):", "in range(0, 10): for y in range(0, 10): A[x][y] = x + y", "# example 1.2 - explicit function def addition(x, y): return x+y A =", "decides the dimension of the returned tensor. The second field `fcompute` defines the", "= tensor.shape stage.stmt_stack.append([]) ret = fcompute(*var_list) print(dir(ret)) print(dir(ret.a)) print(dir(ret.b)) stage.lhs_tensors.add(tensor) for t in", "= _make.Store(buffer_var, _make.Cast(dtype, ret[tuple(ret_ivs)]), index) stage.emit(make_for(ret_ivs, st, 0)) stmt = stage.pop_stmt() stage.input_stages.remove(stage) if", "the Store statement index : Expr The index of the Store statement \"\"\"", "KernelDef statement We do not need to replace the Return statement inside. \"\"\"", "for i in range(0, len(ret.shape))] non_reduce_ivs = [] indices = [] rid =", "variable of the Store statement dtype : Type The data type of the", "------- Tensor Examples -------- .. code-block:: python # example 1.1 - anonymous lambda", "make_for(indices, stmt, 0) elif isinstance(ret, (TensorSlice, Scalar, _expr.Expr, numbers.Number)): indices = lambda_ivs index,", "element of the returned tensor. It can contain other HeteroCL APIs, even imperative", "# list of arguments' names nargs = 0 # number of arguments if", "with a Store statement. Attributes ---------- buffer_var : Var The buffer variable of", "elif isinstance(ret, (TensorSlice, Scalar, _expr.Expr, numbers.Number)): indices = lambda_ivs index, _, _ =", "shape): \"\"\"Pre-process the fcompute field of an API. \"\"\" # check API correctness", "from tvm.tir import IterVar as _IterVar from util import get_index, get_name, make_for, CastRemover", "self.index = index def mutate_KerenlDef(self, node): \"\"\"Omit the KernelDef statement We do not", "if non_reduce_ivs: stmt = make_for(non_reduce_ivs, stmt, 0) else: raise APIError(\"Unknown return type of", "index : Expr The index of the Store statement \"\"\" def __init__(self, buffer_var,", "10): for y in range(0, 10): A[x][y] = x + y # example", "Tensor): # reduction ret_ivs = [_IterVar((0, ret.shape[i]), ret.name+\"_i\" + str(i), 0) for i", "def addition(x, y): return x+y A = hcl.compute((10, 10), addition) # example 1.3", "all hcl.return_ with Store stmt indices = lambda_ivs index, _, _ = get_index(shape,", "The data type of the placeholder Returns ------- Tensor Examples -------- .. code-block::", "shape or the iteration domain dtype : Type, optional The data type of", "= ReplaceReturn(buffer_var, dtype, index).mutate(stmt) stmt = make_for(indices, stmt, 0) elif isinstance(ret, (TensorSlice, Scalar,", "stmt = stage.pop_stmt() stage.input_stages.remove(stage) if non_reduce_ivs: stmt = make_for(non_reduce_ivs, stmt, 0) else: raise", "0) st = _make.Store(buffer_var, _make.Cast(dtype, ret[tuple(ret_ivs)]), index) stage.emit(make_for(ret_ivs, st, 0)) stmt = stage.pop_stmt()", "= CastRemover().mutate(shape) name = get_name(\"compute\", name) # prepare the iteration variables args, nargs", "numbers.Number)): indices = lambda_ivs index, _, _ = get_index(shape, indices, 0) stage.emit(_make.Store(buffer_var, _make.Cast(dtype,", "= hcl.compute((10, 10), addition) # example 2 - undetermined arguments def compute_tanh(X): return", "indices = lambda_ivs index, _, _ = get_index(shape, indices, 0) stage.emit(_make.Store(buffer_var, _make.Cast(dtype, ret),", "10), lambda x, y: x+y) # equivalent code for x in range(0, 10):", "fcompute field of an API. \"\"\" # check API correctness if not callable(fcompute):", "APIError from module import Module ############################################################################## # Helper classes and functions ############################################################################## class", "\"\"\"Omit the KernelDef statement We do not need to replace the Return statement", "new tensor**. The shape must be a tuple. The number of elements in", "name) # prepare the iteration variables args, nargs = process_fcompute(fcompute, shape) lambda_ivs =", "y): hcl.return_(x+y) A = hcl.compute((10, 10), addition) # example 2 - undetermined arguments", "lambda_ivs, fcompute, shape=(), dtype=None, tensor=None, attrs=OrderedDict()): \"\"\"Create a stage and perform the computation.", "computation. If `tensor` is `None`, no tensor is returned. Parameters ---------- name :", "stage.input_stages.remove(stage) if non_reduce_ivs: stmt = make_for(non_reduce_ivs, stmt, 0) else: raise APIError(\"Unknown return type", "---------- shape : tuple The shape of the returned tensor fcompute : callable", "Helper classes and functions ############################################################################## class ReplaceReturn(CastRemover): \"\"\"Replace all Return statement with a", "nargs = 0 # number of arguments if isinstance(fcompute, Module): args = fcompute.arg_names", "None \"\"\" var_list = [i.var for i in lambda_ivs] return_tensor = True if", "returns a new tensor tensor = compute_body(name, lambda_ivs, fcompute, shape, dtype, attrs=attrs) return", "however, provides users more programming flexibility. The compute function specifies how we calculate", "the lambda function if exists fcompute : callable The computation rule shape :", "`shape`, which *we do not check*. This, however, provides users more programming flexibility.", "statement. Attributes ---------- buffer_var : Var The buffer variable of the Store statement", "ret[tuple(ret_ivs)]), index) stage.emit(make_for(ret_ivs, st, 0)) stmt = stage.pop_stmt() stage.input_stages.remove(stage) if non_reduce_ivs: stmt =", "the computation rule\") # add attributes to the loop if isinstance(stmt, _stmt.For): stmt", "rule of the returned tensor, which must be callable. The number of arguments", "\"\"\" #pylint: disable=no-self-use return node def mutate_Return(self, node): \"\"\"Replace the Return statement with", "iteration variables args, nargs = process_fcompute(fcompute, shape) lambda_ivs = [_IterVar((0, shape[n]), args[n], 0)", "_make.Cast(dtype, ret[tuple(ret_ivs)]), index) stage.emit(make_for(ret_ivs, st, 0)) stmt = stage.pop_stmt() stage.input_stages.remove(stage) if non_reduce_ivs: stmt", "be callable. The number of arguments should match the dimension defined by `shape`,", "how we calculate each element of the returned tensor. It can contain other", "number of arguments should match the dimension defined by `shape`, which *we do", "rule\") # add attributes to the loop if isinstance(stmt, _stmt.For): stmt = _make.For(stmt.loop_var,", "defined by `shape`, which *we do not check*. This, however, provides users more", ": tuple The shape of the returned tensor fcompute : callable The construction", "exists fcompute : callable The computation rule shape : tuple, optional The output", "Type, optional The data type of the output/updated tensor tensor : Tensor, optional", "shape[n]), args[n], 0) for n in range(0, nargs)] # call the helper function", "if isinstance(fcompute, Module): args = fcompute.arg_names nargs = len(args) else: args = list(fcompute.__code__.co_varnames)", "shape) as stage: if not return_tensor: stage.input_stages.add(tensor.last_update) else: tensor = Tensor(shape, stage._dtype, name,", "stmt = make_for(non_reduce_ivs, stmt, 0) else: raise APIError(\"Unknown return type of the computation", "as _IterVar from util import get_index, get_name, make_for, CastRemover from tensor import Scalar,", "= indices + stage.axis_list if return_tensor: tensor._tensor = stage._op return tensor return None", "The construction rule for the returned tensor name : str, optional The name", "y: x+y) # equivalent code for x in range(0, 10): for y in", "= x + y # example 1.2 - explicit function def addition(x, y):", "= stage.pop_stmt() stage.input_stages.remove(stage) if non_reduce_ivs: stmt = make_for(non_reduce_ivs, stmt, 0) else: raise APIError(\"Unknown", "= hcl.compute((10, 10), addition) # example 1.3 - imperative function definition @hcl.def_([(), ()])", "0, stmt.body, list(attrs.keys()), list(attrs.values())) stage.emit(stmt) stage.axis_list = indices + stage.axis_list if return_tensor: tensor._tensor", "A[x][y] = x + y # example 1.2 - explicit function def addition(x,", "make_for(non_reduce_ivs, stmt, 0) else: raise APIError(\"Unknown return type of the computation rule\") #", "callable(fcompute): raise APIError(\"The construction rule must be callable\") # prepare the iteration variables", "tensor return None ############################################################################## # APIs exposed to users ############################################################################## def compute(shape, fcompute,", "############################################################################## def compute(shape, fcompute, name=None, dtype=None, attrs=OrderedDict()): \"\"\"Construct a new tensor based on", "no tensor is returned. Parameters ---------- name : str The name of the", "in range(0, len(ret.shape))] non_reduce_ivs = [] indices = [] rid = 0 for", "statement index : Expr The index of the Store statement \"\"\" def __init__(self,", "range(0, 10): for y in range(0, 10): A[x][y] = x + y #", "variables in the lambda function if exists fcompute : callable The computation rule", "if not isinstance(shape, tuple): raise APIError(\"The shape of compute API must be a", "1.1 - anonymous lambda function A = hcl.compute((10, 10), lambda x, y: x+y)", "return args, len(shape) def compute_body(name, lambda_ivs, fcompute, shape=(), dtype=None, tensor=None, attrs=OrderedDict()): \"\"\"Create a", "the returned tensor name : str, optional The name of the returned tensor", "Store statement \"\"\" def __init__(self, buffer_var, dtype, index): self.buffer_var = buffer_var self.dtype =", "compute_body(name, lambda_ivs, fcompute, shape=(), dtype=None, tensor=None, attrs=OrderedDict()): \"\"\"Create a stage and perform the", "y): return x+y A = hcl.compute((10, 10), addition) # example 1.3 - imperative", "index).mutate(stmt) stmt = make_for(indices, stmt, 0) elif isinstance(ret, (TensorSlice, Scalar, _expr.Expr, numbers.Number)): indices", "args, len(shape) def compute_body(name, lambda_ivs, fcompute, shape=(), dtype=None, tensor=None, attrs=OrderedDict()): \"\"\"Create a stage", "dtype, index).mutate(stmt) stmt = make_for(indices, stmt, 0) elif isinstance(ret, (TensorSlice, Scalar, _expr.Expr, numbers.Number)):", "\"\"\" # check API correctness if not callable(fcompute): raise APIError(\"The construction rule must", "args = [] # list of arguments' names nargs = 0 # number", "return tensor return None ############################################################################## # APIs exposed to users ############################################################################## def compute(shape,", "callable. The number of arguments should match the dimension defined by `shape`, which", "len(shape) def compute_body(name, lambda_ivs, fcompute, shape=(), dtype=None, tensor=None, attrs=OrderedDict()): \"\"\"Create a stage and", "def compute_body(name, lambda_ivs, fcompute, shape=(), dtype=None, tensor=None, attrs=OrderedDict()): \"\"\"Create a stage and perform", "`fcompute` defines the construction rule of the returned tensor, which must be callable.", "*args: hcl.tanh(X[args])) A = hcl.placeholder((10, 10)) B = hcl.placeholder((10, 10, 10)) tA =", ": Var The buffer variable of the Store statement dtype : Type The", "API must be a tuple\") # properties for the returned tensor shape =", "and functions ############################################################################## class ReplaceReturn(CastRemover): \"\"\"Replace all Return statement with a Store statement.", "computation rule\") # add attributes to the loop if isinstance(stmt, _stmt.For): stmt =", "dtype=None, tensor=None, attrs=OrderedDict()): \"\"\"Create a stage and perform the computation. If `tensor` is", "= get_index(shape, indices, 0) st = _make.Store(buffer_var, _make.Cast(dtype, ret[tuple(ret_ivs)]), index) stage.emit(make_for(ret_ivs, st, 0))", "dtype : Type, optional The data type of the placeholder Returns ------- Tensor", "the output/updated tensor tensor : Tensor, optional The tensor to be updated. Create", ": Type, optional The data type of the output/updated tensor tensor : Tensor,", "= [_IterVar((0, ret.shape[i]), ret.name+\"_i\" + str(i), 0) for i in range(0, len(ret.shape))] non_reduce_ivs", "# example 2 - undetermined arguments def compute_tanh(X): return hcl.compute(X.shape, lambda *args: hcl.tanh(X[args]))", "nargs < len(shape): for i in range(nargs, len(shape)): args.append(\"args\" + str(i)) elif nargs", "domain dtype : Type, optional The data type of the output/updated tensor tensor", "x in range(0, 10): for y in range(0, 10): A[x][y] = x +", "\"\"\"Compute APIs in HeteroCL\"\"\" #pylint: disable=no-member, redefined-builtin, too-many-arguments, missing-docstring import numbers from collections", "tuple The shape of the returned tensor fcompute : callable The construction rule", "tensor is None else False with Stage(name, dtype, shape) as stage: if not", "# add attributes to the loop if isinstance(stmt, _stmt.For): stmt = _make.For(stmt.loop_var, stmt.min,", "tensor import Scalar, Tensor, TensorSlice from schedule import Stage from debug import APIError", "1.3 - imperative function definition @hcl.def_([(), ()]) def addition(x, y): hcl.return_(x+y) A =", "the loop if isinstance(stmt, _stmt.For): stmt = _make.For(stmt.loop_var, stmt.min, stmt.extent, 0, 0, stmt.body,", "process_fcompute(fcompute, shape) lambda_ivs = [_IterVar((0, shape[n]), args[n], 0) for n in range(0, nargs)]", "number of reduction axes in lambda arguments\") index, _, _ = get_index(shape, indices,", "loop if isinstance(stmt, _stmt.For): stmt = _make.For(stmt.loop_var, stmt.min, stmt.extent, 0, 0, stmt.body, list(attrs.keys()),", "of the returned tensor. It can contain other HeteroCL APIs, even imperative DSL.", "the returned tensor fcompute : callable The construction rule for the returned tensor", "isinstance(fcompute, Module): args = fcompute.arg_names nargs = len(args) else: args = list(fcompute.__code__.co_varnames) nargs", "tuple decides the dimension of the returned tensor. The second field `fcompute` defines", "2 - undetermined arguments def compute_tanh(X): return hcl.compute(X.shape, lambda *args: hcl.tanh(X[args])) A =", "dtype=None, attrs=OrderedDict()): \"\"\"Construct a new tensor based on the shape and the compute", "# prepare the iteration variables args = [] # list of arguments' names", "stmt, 0) else: raise APIError(\"Unknown return type of the computation rule\") # add", "None: # replace all hcl.return_ with Store stmt indices = lambda_ivs index, _,", "tensor is returned. Parameters ---------- name : str The name of the stage", "second field `fcompute` defines the construction rule of the returned tensor, which must", "with hcl.if_(x > y): hcl.return_(x) with hcl.else_: hcl.return_(y) A = hcl.compute((10, 10), return_max)", "# Helper classes and functions ############################################################################## class ReplaceReturn(CastRemover): \"\"\"Replace all Return statement with", "exceeds the number of dimensions\") return args, len(shape) def compute_body(name, lambda_ivs, fcompute, shape=(),", "hcl.compute(X.shape, lambda *args: hcl.tanh(X[args])) A = hcl.placeholder((10, 10)) B = hcl.placeholder((10, 10, 10))", "callable\") # prepare the iteration variables args = [] # list of arguments'", "len(shape)): args.append(\"args\" + str(i)) elif nargs > len(shape): raise APIError(\"The number of arguments", "= tensor.dtype shape = tensor.shape stage.stmt_stack.append([]) ret = fcompute(*var_list) print(dir(ret)) print(dir(ret.a)) print(dir(ret.b)) stage.lhs_tensors.add(tensor)", "y): hcl.return_(x) with hcl.else_: hcl.return_(y) A = hcl.compute((10, 10), return_max) \"\"\" # check", "print(dir(ret.a)) print(dir(ret.b)) stage.lhs_tensors.add(tensor) for t in stage.lhs_tensors: t.last_update = stage stmt = None", "i in range(nargs, len(shape)): args.append(\"args\" + str(i)) elif nargs > len(shape): raise APIError(\"The", "elif nargs > len(shape): raise APIError(\"The number of arguments exceeds the number of", "iteration variables args = [] # list of arguments' names nargs = 0", "the placeholder Returns ------- Tensor Examples -------- .. code-block:: python # example 1.1", "############################################################################## # Helper classes and functions ############################################################################## class ReplaceReturn(CastRemover): \"\"\"Replace all Return statement", "------- Tensor or None \"\"\" var_list = [i.var for i in lambda_ivs] return_tensor", "shape=(), dtype=None, tensor=None, attrs=OrderedDict()): \"\"\"Create a stage and perform the computation. If `tensor`", "tensor to be updated. Create a new one if it is `None` Returns", "functions ############################################################################## class ReplaceReturn(CastRemover): \"\"\"Replace all Return statement with a Store statement. Attributes", "True if tensor is None else False with Stage(name, dtype, shape) as stage:", "APIError(\"Unknown return type of the computation rule\") # add attributes to the loop", "It can contain other HeteroCL APIs, even imperative DSL. Parameters ---------- shape :", "stmt as _stmt from tvm.tir import IterVar as _IterVar from util import get_index,", "import Stage from debug import APIError from module import Module ############################################################################## # Helper", "API correctness if not callable(fcompute): raise APIError(\"The construction rule must be callable\") #", "or None \"\"\" var_list = [i.var for i in lambda_ivs] return_tensor = True", "returned tensor fcompute : callable The construction rule for the returned tensor name", "y # example 1.2 - explicit function def addition(x, y): return x+y A", "args, nargs = process_fcompute(fcompute, shape) lambda_ivs = [_IterVar((0, shape[n]), args[n], 0) for n", "programming def return_max(x, y): with hcl.if_(x > y): hcl.return_(x) with hcl.else_: hcl.return_(y) A", "i in lambda_ivs] return_tensor = True if tensor is None else False with", "for iv in lambda_ivs: if iv.var.name[0] == \"_\": indices.append(ret_ivs[rid]) rid += 1 else:", "CastRemover().mutate(shape) name = get_name(\"compute\", name) # prepare the iteration variables args, nargs =", "which must be callable. The number of arguments should match the dimension defined", "str, optional The name of the returned tensor dtype : Type, optional The", "from module import Module ############################################################################## # Helper classes and functions ############################################################################## class ReplaceReturn(CastRemover):", "0) for i in range(0, len(ret.shape))] non_reduce_ivs = [] indices = [] rid", "must be callable\") # prepare the iteration variables args = [] # list", "of compute API must be a tuple\") # properties for the returned tensor", "Returns ------- Tensor Examples -------- .. code-block:: python # example 1.1 - anonymous", "in the tuple decides the dimension of the returned tensor. The second field", "function if exists fcompute : callable The computation rule shape : tuple, optional", "stmt = None if ret is None: # replace all hcl.return_ with Store", "_make.Cast(dtype, ret), index)) stmt = make_for(indices, stage.pop_stmt(), 0) elif isinstance(ret, Tensor): # reduction", "index): self.buffer_var = buffer_var self.dtype = dtype self.index = index def mutate_KerenlDef(self, node):", "= list(fcompute.__code__.co_varnames) nargs = fcompute.__code__.co_argcount # automatically create argument names if nargs <", "hcl.return_(x+y) A = hcl.compute((10, 10), addition) # example 2 - undetermined arguments def", "import OrderedDict from tvm import expr_hcl as _expr, stmt as _stmt from tvm.tir", "stmt.body, list(attrs.keys()), list(attrs.values())) stage.emit(stmt) stage.axis_list = indices + stage.axis_list if return_tensor: tensor._tensor =", "not need to replace the Return statement inside. \"\"\" #pylint: disable=no-self-use return node", "get_index(shape, indices, 0) stage.emit(_make.Store(buffer_var, _make.Cast(dtype, ret), index)) stmt = make_for(indices, stage.pop_stmt(), 0) elif", "rid += 1 else: indices.append(iv) non_reduce_ivs.append(iv) if rid != len(ret.shape): raise APIError(\"Incorrect number", "None if ret is None: # replace all hcl.return_ with Store stmt indices", "that returns a new tensor tensor = compute_body(name, lambda_ivs, fcompute, shape, dtype, attrs=attrs)", "shape : tuple, optional The output shape or the iteration domain dtype :", "range(0, 10): A[x][y] = x + y # example 1.2 - explicit function", "The second field `fcompute` defines the construction rule of the returned tensor, which", "do not need to replace the Return statement inside. \"\"\" #pylint: disable=no-self-use return", "import APIError from module import Module ############################################################################## # Helper classes and functions ##############################################################################", "---------- buffer_var : Var The buffer variable of the Store statement dtype :", "rid = 0 for iv in lambda_ivs: if iv.var.name[0] == \"_\": indices.append(ret_ivs[rid]) rid", "by `shape`, which *we do not check*. This, however, provides users more programming", "of the returned tensor dtype : Type, optional The data type of the", "the stage lambda_ivs : list of IterVar A list contains the iteration variables", "classes and functions ############################################################################## class ReplaceReturn(CastRemover): \"\"\"Replace all Return statement with a Store", "0) elif isinstance(ret, (TensorSlice, Scalar, _expr.Expr, numbers.Number)): indices = lambda_ivs index, _, _", "arguments def compute_tanh(X): return hcl.compute(X.shape, lambda *args: hcl.tanh(X[args])) A = hcl.placeholder((10, 10)) B", "indices = [] rid = 0 for iv in lambda_ivs: if iv.var.name[0] ==", "The number of arguments should match the dimension defined by `shape`, which *we", "debug import APIError from module import Module ############################################################################## # Helper classes and functions", "if it is `None` Returns ------- Tensor or None \"\"\" var_list = [i.var", "optional The name of the returned tensor dtype : Type, optional The data", "else: args = list(fcompute.__code__.co_varnames) nargs = fcompute.__code__.co_argcount # automatically create argument names if", "a new tensor**. The shape must be a tuple. The number of elements", "print(dir(ret)) print(dir(ret.a)) print(dir(ret.b)) stage.lhs_tensors.add(tensor) for t in stage.lhs_tensors: t.last_update = stage stmt =", "hcl.else_: hcl.return_(y) A = hcl.compute((10, 10), return_max) \"\"\" # check API correctness if", "len(args) else: args = list(fcompute.__code__.co_varnames) nargs = fcompute.__code__.co_argcount # automatically create argument names", "= True if tensor is None else False with Stage(name, dtype, shape) as", "hcl.return_ with Store stmt indices = lambda_ivs index, _, _ = get_index(shape, indices,", "statement \"\"\" def __init__(self, buffer_var, dtype, index): self.buffer_var = buffer_var self.dtype = dtype", "rule for the returned tensor name : str, optional The name of the", "class ReplaceReturn(CastRemover): \"\"\"Replace all Return statement with a Store statement. Attributes ---------- buffer_var", "function A = hcl.compute((10, 10), lambda x, y: x+y) # equivalent code for", "of reduction axes in lambda arguments\") index, _, _ = get_index(shape, indices, 0)", "statement dtype : Type The data type of the Store statement index :", "or the iteration domain dtype : Type, optional The data type of the", "stage and perform the computation. If `tensor` is `None`, no tensor is returned.", "compute_tanh(B) # example 3 - mixed-paradigm programming def return_max(x, y): with hcl.if_(x >", "for t in stage.lhs_tensors: t.last_update = stage stmt = None if ret is", "the shape and the compute function. The API **returns a new tensor**. The", "A = hcl.compute((10, 10), return_max) \"\"\" # check API correctness if not isinstance(shape,", "Tensor(shape, stage._dtype, name, stage._buf) buffer_var = tensor._buf.data dtype = tensor.dtype shape = tensor.shape", "#pylint: disable=no-self-use return node def mutate_Return(self, node): \"\"\"Replace the Return statement with a", "ret is None: # replace all hcl.return_ with Store stmt indices = lambda_ivs", "be a tuple. The number of elements in the tuple decides the dimension", "fcompute(*var_list) print(dir(ret)) print(dir(ret.a)) print(dir(ret.b)) stage.lhs_tensors.add(tensor) for t in stage.lhs_tensors: t.last_update = stage stmt", "############################################################################## class ReplaceReturn(CastRemover): \"\"\"Replace all Return statement with a Store statement. Attributes ----------", ": Expr The index of the Store statement \"\"\" def __init__(self, buffer_var, dtype,", "Type The data type of the Store statement index : Expr The index", "The index of the Store statement \"\"\" def __init__(self, buffer_var, dtype, index): self.buffer_var", "\"\"\"Create a stage and perform the computation. If `tensor` is `None`, no tensor", "Create a new one if it is `None` Returns ------- Tensor or None", "== \"_\": indices.append(ret_ivs[rid]) rid += 1 else: indices.append(iv) non_reduce_ivs.append(iv) if rid != len(ret.shape):", "contains the iteration variables in the lambda function if exists fcompute : callable", "\"\"\"Construct a new tensor based on the shape and the compute function. The", "the Store statement \"\"\" def __init__(self, buffer_var, dtype, index): self.buffer_var = buffer_var self.dtype", "inside. \"\"\" #pylint: disable=no-self-use return node def mutate_Return(self, node): \"\"\"Replace the Return statement", "returned tensor dtype : Type, optional The data type of the placeholder Returns", "name = get_name(\"compute\", name) # prepare the iteration variables args, nargs = process_fcompute(fcompute,", "args.append(\"args\" + str(i)) elif nargs > len(shape): raise APIError(\"The number of arguments exceeds", "Parameters ---------- name : str The name of the stage lambda_ivs : list", "APIError(\"The number of arguments exceeds the number of dimensions\") return args, len(shape) def", "**returns a new tensor**. The shape must be a tuple. The number of", "should match the dimension defined by `shape`, which *we do not check*. This,", "dtype, index): self.buffer_var = buffer_var self.dtype = dtype self.index = index def mutate_KerenlDef(self,", "Store statement. Attributes ---------- buffer_var : Var The buffer variable of the Store", "rid != len(ret.shape): raise APIError(\"Incorrect number of reduction axes in lambda arguments\") index,", "not isinstance(shape, tuple): raise APIError(\"The shape of compute API must be a tuple\")", "- anonymous lambda function A = hcl.compute((10, 10), lambda x, y: x+y) #", "tuple, optional The output shape or the iteration domain dtype : Type, optional", "list(fcompute.__code__.co_varnames) nargs = fcompute.__code__.co_argcount # automatically create argument names if nargs < len(shape):", "= get_name(\"compute\", name) # prepare the iteration variables args, nargs = process_fcompute(fcompute, shape)", "number of arguments exceeds the number of dimensions\") return args, len(shape) def compute_body(name,", "fcompute, shape=(), dtype=None, tensor=None, attrs=OrderedDict()): \"\"\"Create a stage and perform the computation. If", "in the lambda function if exists fcompute : callable The computation rule shape", "- imperative function definition @hcl.def_([(), ()]) def addition(x, y): hcl.return_(x+y) A = hcl.compute((10,", "type of the computation rule\") # add attributes to the loop if isinstance(stmt,", "# number of arguments if isinstance(fcompute, Module): args = fcompute.arg_names nargs = len(args)", "new one if it is `None` Returns ------- Tensor or None \"\"\" var_list", "def compute_tanh(X): return hcl.compute(X.shape, lambda *args: hcl.tanh(X[args])) A = hcl.placeholder((10, 10)) B =", ": str The name of the stage lambda_ivs : list of IterVar A", "schedule import Stage from debug import APIError from module import Module ############################################################################## #", "Scalar, _expr.Expr, numbers.Number)): indices = lambda_ivs index, _, _ = get_index(shape, indices, 0)", "non_reduce_ivs = [] indices = [] rid = 0 for iv in lambda_ivs:", "of arguments exceeds the number of dimensions\") return args, len(shape) def compute_body(name, lambda_ivs,", "hcl.compute((10, 10), return_max) \"\"\" # check API correctness if not isinstance(shape, tuple): raise", "Type, optional The data type of the placeholder Returns ------- Tensor Examples --------", "type of the Store statement index : Expr The index of the Store", "compute function. The API **returns a new tensor**. The shape must be a", "based on the shape and the compute function. The API **returns a new", "lambda_ivs] return_tensor = True if tensor is None else False with Stage(name, dtype,", "stage: if not return_tensor: stage.input_stages.add(tensor.last_update) else: tensor = Tensor(shape, stage._dtype, name, stage._buf) buffer_var", "arguments should match the dimension defined by `shape`, which *we do not check*.", "the dimension defined by `shape`, which *we do not check*. This, however, provides", "stage.lhs_tensors: t.last_update = stage stmt = None if ret is None: # replace", "stage lambda_ivs : list of IterVar A list contains the iteration variables in", "\"\"\" def __init__(self, buffer_var, dtype, index): self.buffer_var = buffer_var self.dtype = dtype self.index", "else: indices.append(iv) non_reduce_ivs.append(iv) if rid != len(ret.shape): raise APIError(\"Incorrect number of reduction axes", "example 1.2 - explicit function def addition(x, y): return x+y A = hcl.compute((10,", "10, 10)) tA = compute_tanh(A) tB = compute_tanh(B) # example 3 - mixed-paradigm", "callable The construction rule for the returned tensor name : str, optional The", "None ############################################################################## # APIs exposed to users ############################################################################## def compute(shape, fcompute, name=None, dtype=None,", "return_max) \"\"\" # check API correctness if not isinstance(shape, tuple): raise APIError(\"The shape", "if rid != len(ret.shape): raise APIError(\"Incorrect number of reduction axes in lambda arguments\")", "fcompute.__code__.co_argcount # automatically create argument names if nargs < len(shape): for i in", "dtype : Type The data type of the Store statement index : Expr", "= lambda_ivs index, _, _ = get_index(shape, indices, 0) stmt = stage.pop_stmt() stmt", "`tensor` is `None`, no tensor is returned. Parameters ---------- name : str The", "Store stmt indices = lambda_ivs index, _, _ = get_index(shape, indices, 0) stmt", "get_index, get_name, make_for, CastRemover from tensor import Scalar, Tensor, TensorSlice from schedule import", "lambda_ivs index, _, _ = get_index(shape, indices, 0) stmt = stage.pop_stmt() stmt =", "stage.pop_stmt(), 0) elif isinstance(ret, Tensor): # reduction ret_ivs = [_IterVar((0, ret.shape[i]), ret.name+\"_i\" +", "tensor tensor : Tensor, optional The tensor to be updated. Create a new", "construction rule for the returned tensor name : str, optional The name of", "(TensorSlice, Scalar, _expr.Expr, numbers.Number)): indices = lambda_ivs index, _, _ = get_index(shape, indices,", "and the compute function. The API **returns a new tensor**. The shape must", "all Return statement with a Store statement. Attributes ---------- buffer_var : Var The", "compute API must be a tuple\") # properties for the returned tensor shape", "construction rule must be callable\") # prepare the iteration variables args = []", "of the stage lambda_ivs : list of IterVar A list contains the iteration", "range(nargs, len(shape)): args.append(\"args\" + str(i)) elif nargs > len(shape): raise APIError(\"The number of", "tensor.dtype shape = tensor.shape stage.stmt_stack.append([]) ret = fcompute(*var_list) print(dir(ret)) print(dir(ret.a)) print(dir(ret.b)) stage.lhs_tensors.add(tensor) for", "A = hcl.compute((10, 10), addition) # example 2 - undetermined arguments def compute_tanh(X):", "the returned tensor shape = CastRemover().mutate(shape) name = get_name(\"compute\", name) # prepare the", "indices + stage.axis_list if return_tensor: tensor._tensor = stage._op return tensor return None ##############################################################################", "shape : tuple The shape of the returned tensor fcompute : callable The", "A list contains the iteration variables in the lambda function if exists fcompute", "code-block:: python # example 1.1 - anonymous lambda function A = hcl.compute((10, 10),", "users ############################################################################## def compute(shape, fcompute, name=None, dtype=None, attrs=OrderedDict()): \"\"\"Construct a new tensor based", "stage.pop_stmt() stmt = ReplaceReturn(buffer_var, dtype, index).mutate(stmt) stmt = make_for(indices, stmt, 0) elif isinstance(ret,", "= 0 for iv in lambda_ivs: if iv.var.name[0] == \"_\": indices.append(ret_ivs[rid]) rid +=", "= buffer_var self.dtype = dtype self.index = index def mutate_KerenlDef(self, node): \"\"\"Omit the", "len(shape): for i in range(nargs, len(shape)): args.append(\"args\" + str(i)) elif nargs > len(shape):", "_ = get_index(shape, indices, 0) st = _make.Store(buffer_var, _make.Cast(dtype, ret[tuple(ret_ivs)]), index) stage.emit(make_for(ret_ivs, st,", "the dimension of the returned tensor. The second field `fcompute` defines the construction", "for n in range(0, nargs)] # call the helper function that returns a", "isinstance(stmt, _stmt.For): stmt = _make.For(stmt.loop_var, stmt.min, stmt.extent, 0, 0, stmt.body, list(attrs.keys()), list(attrs.values())) stage.emit(stmt)", "B = hcl.placeholder((10, 10, 10)) tA = compute_tanh(A) tB = compute_tanh(B) # example", "is None else False with Stage(name, dtype, shape) as stage: if not return_tensor:", "lambda arguments\") index, _, _ = get_index(shape, indices, 0) st = _make.Store(buffer_var, _make.Cast(dtype,", "args[n], 0) for n in range(0, nargs)] # call the helper function that", "the Return statement inside. \"\"\" #pylint: disable=no-self-use return node def mutate_Return(self, node): \"\"\"Replace", "_ = get_index(shape, indices, 0) stage.emit(_make.Store(buffer_var, _make.Cast(dtype, ret), index)) stmt = make_for(indices, stage.pop_stmt(),", "= get_index(shape, indices, 0) stage.emit(_make.Store(buffer_var, _make.Cast(dtype, ret), index)) stmt = make_for(indices, stage.pop_stmt(), 0)", "provides users more programming flexibility. The compute function specifies how we calculate each", "API **returns a new tensor**. The shape must be a tuple. The number", "collections import OrderedDict from tvm import expr_hcl as _expr, stmt as _stmt from", "get_index(shape, indices, 0) st = _make.Store(buffer_var, _make.Cast(dtype, ret[tuple(ret_ivs)]), index) stage.emit(make_for(ret_ivs, st, 0)) stmt", "reduction ret_ivs = [_IterVar((0, ret.shape[i]), ret.name+\"_i\" + str(i), 0) for i in range(0,", "The name of the returned tensor dtype : Type, optional The data type", "must be a tuple. The number of elements in the tuple decides the", "process_fcompute(fcompute, shape): \"\"\"Pre-process the fcompute field of an API. \"\"\" # check API", "flexibility. The compute function specifies how we calculate each element of the returned", "raise APIError(\"The shape of compute API must be a tuple\") # properties for", "import IterVar as _IterVar from util import get_index, get_name, make_for, CastRemover from tensor", "the iteration variables args = [] # list of arguments' names nargs =", ": tuple, optional The output shape or the iteration domain dtype : Type,", "`None` Returns ------- Tensor or None \"\"\" var_list = [i.var for i in", "elements in the tuple decides the dimension of the returned tensor. The second", "names if nargs < len(shape): for i in range(nargs, len(shape)): args.append(\"args\" + str(i))", "= len(args) else: args = list(fcompute.__code__.co_varnames) nargs = fcompute.__code__.co_argcount # automatically create argument", "= [_IterVar((0, shape[n]), args[n], 0) for n in range(0, nargs)] # call the", "check API correctness if not isinstance(shape, tuple): raise APIError(\"The shape of compute API", "stmt, 0) elif isinstance(ret, (TensorSlice, Scalar, _expr.Expr, numbers.Number)): indices = lambda_ivs index, _,", "1 else: indices.append(iv) non_reduce_ivs.append(iv) if rid != len(ret.shape): raise APIError(\"Incorrect number of reduction", "shape and the compute function. The API **returns a new tensor**. The shape", "function. The API **returns a new tensor**. The shape must be a tuple.", "a tuple. The number of elements in the tuple decides the dimension of", "example 1.1 - anonymous lambda function A = hcl.compute((10, 10), lambda x, y:", "name : str The name of the stage lambda_ivs : list of IterVar", "tA = compute_tanh(A) tB = compute_tanh(B) # example 3 - mixed-paradigm programming def", "TensorSlice from schedule import Stage from debug import APIError from module import Module", "for x in range(0, 10): for y in range(0, 10): A[x][y] = x", "calculate each element of the returned tensor. It can contain other HeteroCL APIs,", "# equivalent code for x in range(0, 10): for y in range(0, 10):", "stage.emit(_make.Store(buffer_var, _make.Cast(dtype, ret), index)) stmt = make_for(indices, stage.pop_stmt(), 0) elif isinstance(ret, Tensor): #", ".. code-block:: python # example 1.1 - anonymous lambda function A = hcl.compute((10,", "APIs in HeteroCL\"\"\" #pylint: disable=no-member, redefined-builtin, too-many-arguments, missing-docstring import numbers from collections import", "number of arguments if isinstance(fcompute, Module): args = fcompute.arg_names nargs = len(args) else:", "list of arguments' names nargs = 0 # number of arguments if isinstance(fcompute,", "# APIs exposed to users ############################################################################## def compute(shape, fcompute, name=None, dtype=None, attrs=OrderedDict()): \"\"\"Construct", "= None if ret is None: # replace all hcl.return_ with Store stmt", "############################################################################## # APIs exposed to users ############################################################################## def compute(shape, fcompute, name=None, dtype=None, attrs=OrderedDict()):", "The tensor to be updated. Create a new one if it is `None`", "_make.Store(buffer_var, _make.Cast(dtype, ret[tuple(ret_ivs)]), index) stage.emit(make_for(ret_ivs, st, 0)) stmt = stage.pop_stmt() stage.input_stages.remove(stage) if non_reduce_ivs:", "optional The data type of the output/updated tensor tensor : Tensor, optional The", "return x+y A = hcl.compute((10, 10), addition) # example 1.3 - imperative function", "addition(x, y): return x+y A = hcl.compute((10, 10), addition) # example 1.3 -", "_ = get_index(shape, indices, 0) stmt = stage.pop_stmt() stmt = ReplaceReturn(buffer_var, dtype, index).mutate(stmt)", "= hcl.compute((10, 10), lambda x, y: x+y) # equivalent code for x in", "indices, 0) stmt = stage.pop_stmt() stmt = ReplaceReturn(buffer_var, dtype, index).mutate(stmt) stmt = make_for(indices,", "@hcl.def_([(), ()]) def addition(x, y): hcl.return_(x+y) A = hcl.compute((10, 10), addition) # example", "hcl.return_(x) with hcl.else_: hcl.return_(y) A = hcl.compute((10, 10), return_max) \"\"\" # check API", "from debug import APIError from module import Module ############################################################################## # Helper classes and", "of elements in the tuple decides the dimension of the returned tensor. The", "statement \"\"\" return _make.Store(self.buffer_var, _make.Cast(self.dtype, node.value), self.index) def process_fcompute(fcompute, shape): \"\"\"Pre-process the fcompute", "Returns ------- Tensor or None \"\"\" var_list = [i.var for i in lambda_ivs]", "The shape of the returned tensor fcompute : callable The construction rule for", "statement with a Store statement \"\"\" return _make.Store(self.buffer_var, _make.Cast(self.dtype, node.value), self.index) def process_fcompute(fcompute,", "isinstance(ret, (TensorSlice, Scalar, _expr.Expr, numbers.Number)): indices = lambda_ivs index, _, _ = get_index(shape,", "fcompute, name=None, dtype=None, attrs=OrderedDict()): \"\"\"Construct a new tensor based on the shape and", "[i.var for i in lambda_ivs] return_tensor = True if tensor is None else", "callable The computation rule shape : tuple, optional The output shape or the", "raise APIError(\"Incorrect number of reduction axes in lambda arguments\") index, _, _ =", "= get_index(shape, indices, 0) stmt = stage.pop_stmt() stmt = ReplaceReturn(buffer_var, dtype, index).mutate(stmt) stmt", "of arguments should match the dimension defined by `shape`, which *we do not", "missing-docstring import numbers from collections import OrderedDict from tvm import expr_hcl as _expr,", "def __init__(self, buffer_var, dtype, index): self.buffer_var = buffer_var self.dtype = dtype self.index =", "stmt = stage.pop_stmt() stmt = ReplaceReturn(buffer_var, dtype, index).mutate(stmt) stmt = make_for(indices, stmt, 0)", "+ str(i), 0) for i in range(0, len(ret.shape))] non_reduce_ivs = [] indices =", "index def mutate_KerenlDef(self, node): \"\"\"Omit the KernelDef statement We do not need to", "nargs = len(args) else: args = list(fcompute.__code__.co_varnames) nargs = fcompute.__code__.co_argcount # automatically create", "_stmt.For): stmt = _make.For(stmt.loop_var, stmt.min, stmt.extent, 0, 0, stmt.body, list(attrs.keys()), list(attrs.values())) stage.emit(stmt) stage.axis_list", "nargs = process_fcompute(fcompute, shape) lambda_ivs = [_IterVar((0, shape[n]), args[n], 0) for n in", "in range(0, nargs)] # call the helper function that returns a new tensor", "in HeteroCL\"\"\" #pylint: disable=no-member, redefined-builtin, too-many-arguments, missing-docstring import numbers from collections import OrderedDict", "\"\"\" return _make.Store(self.buffer_var, _make.Cast(self.dtype, node.value), self.index) def process_fcompute(fcompute, shape): \"\"\"Pre-process the fcompute field", "APIs exposed to users ############################################################################## def compute(shape, fcompute, name=None, dtype=None, attrs=OrderedDict()): \"\"\"Construct a", "statement with a Store statement. Attributes ---------- buffer_var : Var The buffer variable", "fcompute : callable The construction rule for the returned tensor name : str,", "stmt indices = lambda_ivs index, _, _ = get_index(shape, indices, 0) stmt =", "return_max(x, y): with hcl.if_(x > y): hcl.return_(x) with hcl.else_: hcl.return_(y) A = hcl.compute((10,", "def process_fcompute(fcompute, shape): \"\"\"Pre-process the fcompute field of an API. \"\"\" # check", "function definition @hcl.def_([(), ()]) def addition(x, y): hcl.return_(x+y) A = hcl.compute((10, 10), addition)", "= fcompute(*var_list) print(dir(ret)) print(dir(ret.a)) print(dir(ret.b)) stage.lhs_tensors.add(tensor) for t in stage.lhs_tensors: t.last_update = stage", "in range(nargs, len(shape)): args.append(\"args\" + str(i)) elif nargs > len(shape): raise APIError(\"The number", "0) stage.emit(_make.Store(buffer_var, _make.Cast(dtype, ret), index)) stmt = make_for(indices, stage.pop_stmt(), 0) elif isinstance(ret, Tensor):", "list(attrs.keys()), list(attrs.values())) stage.emit(stmt) stage.axis_list = indices + stage.axis_list if return_tensor: tensor._tensor = stage._op", "0) for n in range(0, nargs)] # call the helper function that returns", "exposed to users ############################################################################## def compute(shape, fcompute, name=None, dtype=None, attrs=OrderedDict()): \"\"\"Construct a new", "more programming flexibility. The compute function specifies how we calculate each element of", "number of elements in the tuple decides the dimension of the returned tensor.", "= [] # list of arguments' names nargs = 0 # number of", "node.value), self.index) def process_fcompute(fcompute, shape): \"\"\"Pre-process the fcompute field of an API. \"\"\"", "buffer_var : Var The buffer variable of the Store statement dtype : Type", "tvm import expr_hcl as _expr, stmt as _stmt from tvm.tir import IterVar as", "+ stage.axis_list if return_tensor: tensor._tensor = stage._op return tensor return None ############################################################################## #", "output/updated tensor tensor : Tensor, optional The tensor to be updated. Create a", "Store statement \"\"\" return _make.Store(self.buffer_var, _make.Cast(self.dtype, node.value), self.index) def process_fcompute(fcompute, shape): \"\"\"Pre-process the", "the iteration variables in the lambda function if exists fcompute : callable The", "new tensor based on the shape and the compute function. The API **returns", "str The name of the stage lambda_ivs : list of IterVar A list", "CastRemover from tensor import Scalar, Tensor, TensorSlice from schedule import Stage from debug", "of the Store statement dtype : Type The data type of the Store", "example 3 - mixed-paradigm programming def return_max(x, y): with hcl.if_(x > y): hcl.return_(x)", "Expr The index of the Store statement \"\"\" def __init__(self, buffer_var, dtype, index):", "range(0, nargs)] # call the helper function that returns a new tensor tensor", "the construction rule of the returned tensor, which must be callable. The number", "def mutate_KerenlDef(self, node): \"\"\"Omit the KernelDef statement We do not need to replace", "= hcl.compute((10, 10), return_max) \"\"\" # check API correctness if not isinstance(shape, tuple):", "= stage.pop_stmt() stmt = ReplaceReturn(buffer_var, dtype, index).mutate(stmt) stmt = make_for(indices, stmt, 0) elif", "for the returned tensor shape = CastRemover().mutate(shape) name = get_name(\"compute\", name) # prepare", "correctness if not callable(fcompute): raise APIError(\"The construction rule must be callable\") # prepare", "for i in lambda_ivs] return_tensor = True if tensor is None else False", "The number of elements in the tuple decides the dimension of the returned", "nargs > len(shape): raise APIError(\"The number of arguments exceeds the number of dimensions\")", "from util import get_index, get_name, make_for, CastRemover from tensor import Scalar, Tensor, TensorSlice", "> y): hcl.return_(x) with hcl.else_: hcl.return_(y) A = hcl.compute((10, 10), return_max) \"\"\" #", "the Store statement dtype : Type The data type of the Store statement", "compute function specifies how we calculate each element of the returned tensor. It", "= compute_tanh(A) tB = compute_tanh(B) # example 3 - mixed-paradigm programming def return_max(x,", "it is `None` Returns ------- Tensor or None \"\"\" var_list = [i.var for", "field `fcompute` defines the construction rule of the returned tensor, which must be", "IterVar as _IterVar from util import get_index, get_name, make_for, CastRemover from tensor import", "of the computation rule\") # add attributes to the loop if isinstance(stmt, _stmt.For):", "[] # list of arguments' names nargs = 0 # number of arguments", "names nargs = 0 # number of arguments if isinstance(fcompute, Module): args =", "stmt = make_for(indices, stmt, 0) elif isinstance(ret, (TensorSlice, Scalar, _expr.Expr, numbers.Number)): indices =", "stmt = _make.For(stmt.loop_var, stmt.min, stmt.extent, 0, 0, stmt.body, list(attrs.keys()), list(attrs.values())) stage.emit(stmt) stage.axis_list =", "hcl.placeholder((10, 10)) B = hcl.placeholder((10, 10, 10)) tA = compute_tanh(A) tB = compute_tanh(B)", "x+y) # equivalent code for x in range(0, 10): for y in range(0,", "__init__(self, buffer_var, dtype, index): self.buffer_var = buffer_var self.dtype = dtype self.index = index", "check API correctness if not callable(fcompute): raise APIError(\"The construction rule must be callable\")", "The buffer variable of the Store statement dtype : Type The data type", "import Scalar, Tensor, TensorSlice from schedule import Stage from debug import APIError from", "# example 1.1 - anonymous lambda function A = hcl.compute((10, 10), lambda x,", "be updated. Create a new one if it is `None` Returns ------- Tensor", "undetermined arguments def compute_tanh(X): return hcl.compute(X.shape, lambda *args: hcl.tanh(X[args])) A = hcl.placeholder((10, 10))", "\"\"\"Replace all Return statement with a Store statement. Attributes ---------- buffer_var : Var", "equivalent code for x in range(0, 10): for y in range(0, 10): A[x][y]", "add attributes to the loop if isinstance(stmt, _stmt.For): stmt = _make.For(stmt.loop_var, stmt.min, stmt.extent,", "len(shape): raise APIError(\"The number of arguments exceeds the number of dimensions\") return args,", "= lambda_ivs index, _, _ = get_index(shape, indices, 0) stage.emit(_make.Store(buffer_var, _make.Cast(dtype, ret), index))", "---------- name : str The name of the stage lambda_ivs : list of", "of the Store statement \"\"\" def __init__(self, buffer_var, dtype, index): self.buffer_var = buffer_var", "# check API correctness if not callable(fcompute): raise APIError(\"The construction rule must be", "hcl.if_(x > y): hcl.return_(x) with hcl.else_: hcl.return_(y) A = hcl.compute((10, 10), return_max) \"\"\"", "_make.Cast(self.dtype, node.value), self.index) def process_fcompute(fcompute, shape): \"\"\"Pre-process the fcompute field of an API.", "\"\"\" # check API correctness if not isinstance(shape, tuple): raise APIError(\"The shape of", "i in range(0, len(ret.shape))] non_reduce_ivs = [] indices = [] rid = 0", "nargs)] # call the helper function that returns a new tensor tensor =", "variables args = [] # list of arguments' names nargs = 0 #", "of the returned tensor, which must be callable. The number of arguments should", "y in range(0, 10): A[x][y] = x + y # example 1.2 -", "hcl.tanh(X[args])) A = hcl.placeholder((10, 10)) B = hcl.placeholder((10, 10, 10)) tA = compute_tanh(A)", "buffer variable of the Store statement dtype : Type The data type of", "make_for, CastRemover from tensor import Scalar, Tensor, TensorSlice from schedule import Stage from", "= Tensor(shape, stage._dtype, name, stage._buf) buffer_var = tensor._buf.data dtype = tensor.dtype shape =", "tensor. The second field `fcompute` defines the construction rule of the returned tensor,", "programming flexibility. The compute function specifies how we calculate each element of the", "tuple): raise APIError(\"The shape of compute API must be a tuple\") # properties", "\"\"\" var_list = [i.var for i in lambda_ivs] return_tensor = True if tensor", "indices, 0) st = _make.Store(buffer_var, _make.Cast(dtype, ret[tuple(ret_ivs)]), index) stage.emit(make_for(ret_ivs, st, 0)) stmt =", "the helper function that returns a new tensor tensor = compute_body(name, lambda_ivs, fcompute,", "Module): args = fcompute.arg_names nargs = len(args) else: args = list(fcompute.__code__.co_varnames) nargs =", "ReplaceReturn(CastRemover): \"\"\"Replace all Return statement with a Store statement. Attributes ---------- buffer_var :", "expr_hcl as _expr, stmt as _stmt from tvm.tir import IterVar as _IterVar from", "_IterVar from util import get_index, get_name, make_for, CastRemover from tensor import Scalar, Tensor,", "Stage from debug import APIError from module import Module ############################################################################## # Helper classes", "0) stmt = stage.pop_stmt() stmt = ReplaceReturn(buffer_var, dtype, index).mutate(stmt) stmt = make_for(indices, stmt,", "\"_\": indices.append(ret_ivs[rid]) rid += 1 else: indices.append(iv) non_reduce_ivs.append(iv) if rid != len(ret.shape): raise", "`None`, no tensor is returned. Parameters ---------- name : str The name of", "10), return_max) \"\"\" # check API correctness if not isinstance(shape, tuple): raise APIError(\"The", "statement inside. \"\"\" #pylint: disable=no-self-use return node def mutate_Return(self, node): \"\"\"Replace the Return", "1.2 - explicit function def addition(x, y): return x+y A = hcl.compute((10, 10),", "stage.input_stages.add(tensor.last_update) else: tensor = Tensor(shape, stage._dtype, name, stage._buf) buffer_var = tensor._buf.data dtype =", "stage.lhs_tensors.add(tensor) for t in stage.lhs_tensors: t.last_update = stage stmt = None if ret", "replace the Return statement inside. \"\"\" #pylint: disable=no-self-use return node def mutate_Return(self, node):", "tvm.tir import IterVar as _IterVar from util import get_index, get_name, make_for, CastRemover from", "as _stmt from tvm.tir import IterVar as _IterVar from util import get_index, get_name,", "import numbers from collections import OrderedDict from tvm import expr_hcl as _expr, stmt", "type of the output/updated tensor tensor : Tensor, optional The tensor to be", "x, y: x+y) # equivalent code for x in range(0, 10): for y", "correctness if not isinstance(shape, tuple): raise APIError(\"The shape of compute API must be", "to replace the Return statement inside. \"\"\" #pylint: disable=no-self-use return node def mutate_Return(self,", "= stage stmt = None if ret is None: # replace all hcl.return_", "stage.emit(make_for(ret_ivs, st, 0)) stmt = stage.pop_stmt() stage.input_stages.remove(stage) if non_reduce_ivs: stmt = make_for(non_reduce_ivs, stmt,", "return hcl.compute(X.shape, lambda *args: hcl.tanh(X[args])) A = hcl.placeholder((10, 10)) B = hcl.placeholder((10, 10,", "= make_for(non_reduce_ivs, stmt, 0) else: raise APIError(\"Unknown return type of the computation rule\")", "if ret is None: # replace all hcl.return_ with Store stmt indices =", "stage._op return tensor return None ############################################################################## # APIs exposed to users ############################################################################## def", "hcl.compute((10, 10), addition) # example 2 - undetermined arguments def compute_tanh(X): return hcl.compute(X.shape,", "APIError(\"Incorrect number of reduction axes in lambda arguments\") index, _, _ = get_index(shape,", "stmt.extent, 0, 0, stmt.body, list(attrs.keys()), list(attrs.values())) stage.emit(stmt) stage.axis_list = indices + stage.axis_list if", "numbers from collections import OrderedDict from tvm import expr_hcl as _expr, stmt as", "= dtype self.index = index def mutate_KerenlDef(self, node): \"\"\"Omit the KernelDef statement We", "stage.emit(stmt) stage.axis_list = indices + stage.axis_list if return_tensor: tensor._tensor = stage._op return tensor", "ret.shape[i]), ret.name+\"_i\" + str(i), 0) for i in range(0, len(ret.shape))] non_reduce_ivs = []", "optional The data type of the placeholder Returns ------- Tensor Examples -------- ..", "do not check*. This, however, provides users more programming flexibility. The compute function", "return_tensor: stage.input_stages.add(tensor.last_update) else: tensor = Tensor(shape, stage._dtype, name, stage._buf) buffer_var = tensor._buf.data dtype", "of an API. \"\"\" # check API correctness if not callable(fcompute): raise APIError(\"The", "st, 0)) stmt = stage.pop_stmt() stage.input_stages.remove(stage) if non_reduce_ivs: stmt = make_for(non_reduce_ivs, stmt, 0)", "lambda_ivs index, _, _ = get_index(shape, indices, 0) stage.emit(_make.Store(buffer_var, _make.Cast(dtype, ret), index)) stmt", "in lambda_ivs] return_tensor = True if tensor is None else False with Stage(name,", "shape = tensor.shape stage.stmt_stack.append([]) ret = fcompute(*var_list) print(dir(ret)) print(dir(ret.a)) print(dir(ret.b)) stage.lhs_tensors.add(tensor) for t", "shape) lambda_ivs = [_IterVar((0, shape[n]), args[n], 0) for n in range(0, nargs)] #", "anonymous lambda function A = hcl.compute((10, 10), lambda x, y: x+y) # equivalent", ": callable The construction rule for the returned tensor name : str, optional", "returned tensor. The second field `fcompute` defines the construction rule of the returned", "return _make.Store(self.buffer_var, _make.Cast(self.dtype, node.value), self.index) def process_fcompute(fcompute, shape): \"\"\"Pre-process the fcompute field of", "= index def mutate_KerenlDef(self, node): \"\"\"Omit the KernelDef statement We do not need", "list(attrs.values())) stage.emit(stmt) stage.axis_list = indices + stage.axis_list if return_tensor: tensor._tensor = stage._op return", "name, stage._buf) buffer_var = tensor._buf.data dtype = tensor.dtype shape = tensor.shape stage.stmt_stack.append([]) ret", "- undetermined arguments def compute_tanh(X): return hcl.compute(X.shape, lambda *args: hcl.tanh(X[args])) A = hcl.placeholder((10,", "- explicit function def addition(x, y): return x+y A = hcl.compute((10, 10), addition)", "of the returned tensor fcompute : callable The construction rule for the returned", "The compute function specifies how we calculate each element of the returned tensor.", "API correctness if not isinstance(shape, tuple): raise APIError(\"The shape of compute API must", "computation rule shape : tuple, optional The output shape or the iteration domain", ": callable The computation rule shape : tuple, optional The output shape or", "output shape or the iteration domain dtype : Type, optional The data type", "import expr_hcl as _expr, stmt as _stmt from tvm.tir import IterVar as _IterVar", "optional The tensor to be updated. Create a new one if it is", "as stage: if not return_tensor: stage.input_stages.add(tensor.last_update) else: tensor = Tensor(shape, stage._dtype, name, stage._buf)", "stmt = ReplaceReturn(buffer_var, dtype, index).mutate(stmt) stmt = make_for(indices, stmt, 0) elif isinstance(ret, (TensorSlice,", "if iv.var.name[0] == \"_\": indices.append(ret_ivs[rid]) rid += 1 else: indices.append(iv) non_reduce_ivs.append(iv) if rid", "< len(shape): for i in range(nargs, len(shape)): args.append(\"args\" + str(i)) elif nargs >", "list contains the iteration variables in the lambda function if exists fcompute :", "to users ############################################################################## def compute(shape, fcompute, name=None, dtype=None, attrs=OrderedDict()): \"\"\"Construct a new tensor", "10)) B = hcl.placeholder((10, 10, 10)) tA = compute_tanh(A) tB = compute_tanh(B) #", "# properties for the returned tensor shape = CastRemover().mutate(shape) name = get_name(\"compute\", name)", "get_name(\"compute\", name) # prepare the iteration variables args, nargs = process_fcompute(fcompute, shape) lambda_ivs", "iteration domain dtype : Type, optional The data type of the output/updated tensor", "a stage and perform the computation. If `tensor` is `None`, no tensor is", "def compute(shape, fcompute, name=None, dtype=None, attrs=OrderedDict()): \"\"\"Construct a new tensor based on the", ": Type, optional The data type of the placeholder Returns ------- Tensor Examples", "iv in lambda_ivs: if iv.var.name[0] == \"_\": indices.append(ret_ivs[rid]) rid += 1 else: indices.append(iv)", "compute(shape, fcompute, name=None, dtype=None, attrs=OrderedDict()): \"\"\"Construct a new tensor based on the shape", "get_index(shape, indices, 0) stmt = stage.pop_stmt() stmt = ReplaceReturn(buffer_var, dtype, index).mutate(stmt) stmt =", "0) elif isinstance(ret, Tensor): # reduction ret_ivs = [_IterVar((0, ret.shape[i]), ret.name+\"_i\" + str(i),", "of dimensions\") return args, len(shape) def compute_body(name, lambda_ivs, fcompute, shape=(), dtype=None, tensor=None, attrs=OrderedDict()):", "a Store statement \"\"\" return _make.Store(self.buffer_var, _make.Cast(self.dtype, node.value), self.index) def process_fcompute(fcompute, shape): \"\"\"Pre-process", "even imperative DSL. Parameters ---------- shape : tuple The shape of the returned", "a new tensor tensor = compute_body(name, lambda_ivs, fcompute, shape, dtype, attrs=attrs) return tensor", "_make.For(stmt.loop_var, stmt.min, stmt.extent, 0, 0, stmt.body, list(attrs.keys()), list(attrs.values())) stage.emit(stmt) stage.axis_list = indices +", "0, 0, stmt.body, list(attrs.keys()), list(attrs.values())) stage.emit(stmt) stage.axis_list = indices + stage.axis_list if return_tensor:", "the returned tensor, which must be callable. The number of arguments should match", "API. \"\"\" # check API correctness if not callable(fcompute): raise APIError(\"The construction rule", "be callable\") # prepare the iteration variables args = [] # list of", "_stmt from tvm.tir import IterVar as _IterVar from util import get_index, get_name, make_for,", "arguments exceeds the number of dimensions\") return args, len(shape) def compute_body(name, lambda_ivs, fcompute,", "A = hcl.compute((10, 10), lambda x, y: x+y) # equivalent code for x", "make_for(indices, stage.pop_stmt(), 0) elif isinstance(ret, Tensor): # reduction ret_ivs = [_IterVar((0, ret.shape[i]), ret.name+\"_i\"", "APIError(\"The shape of compute API must be a tuple\") # properties for the", "= fcompute.__code__.co_argcount # automatically create argument names if nargs < len(shape): for i", "A = hcl.placeholder((10, 10)) B = hcl.placeholder((10, 10, 10)) tA = compute_tanh(A) tB", "[] indices = [] rid = 0 for iv in lambda_ivs: if iv.var.name[0]", "compute_tanh(X): return hcl.compute(X.shape, lambda *args: hcl.tanh(X[args])) A = hcl.placeholder((10, 10)) B = hcl.placeholder((10,", "indices, 0) stage.emit(_make.Store(buffer_var, _make.Cast(dtype, ret), index)) stmt = make_for(indices, stage.pop_stmt(), 0) elif isinstance(ret,", "tensor. It can contain other HeteroCL APIs, even imperative DSL. Parameters ---------- shape", "imperative function definition @hcl.def_([(), ()]) def addition(x, y): hcl.return_(x+y) A = hcl.compute((10, 10),", "stmt = make_for(indices, stage.pop_stmt(), 0) elif isinstance(ret, Tensor): # reduction ret_ivs = [_IterVar((0,", "hcl.compute((10, 10), addition) # example 1.3 - imperative function definition @hcl.def_([(), ()]) def", "the iteration domain dtype : Type, optional The data type of the output/updated", "the compute function. The API **returns a new tensor**. The shape must be", "def mutate_Return(self, node): \"\"\"Replace the Return statement with a Store statement \"\"\" return", "of the Store statement index : Expr The index of the Store statement", "Stage(name, dtype, shape) as stage: if not return_tensor: stage.input_stages.add(tensor.last_update) else: tensor = Tensor(shape,", "need to replace the Return statement inside. \"\"\" #pylint: disable=no-self-use return node def", "Return statement inside. \"\"\" #pylint: disable=no-self-use return node def mutate_Return(self, node): \"\"\"Replace the", "print(dir(ret.b)) stage.lhs_tensors.add(tensor) for t in stage.lhs_tensors: t.last_update = stage stmt = None if", "the KernelDef statement We do not need to replace the Return statement inside.", "x+y A = hcl.compute((10, 10), addition) # example 1.3 - imperative function definition", "the fcompute field of an API. \"\"\" # check API correctness if not", "of IterVar A list contains the iteration variables in the lambda function if", "code for x in range(0, 10): for y in range(0, 10): A[x][y] =", "ret.name+\"_i\" + str(i), 0) for i in range(0, len(ret.shape))] non_reduce_ivs = [] indices", "range(0, len(ret.shape))] non_reduce_ivs = [] indices = [] rid = 0 for iv", "= stage._op return tensor return None ############################################################################## # APIs exposed to users ##############################################################################", "mutate_KerenlDef(self, node): \"\"\"Omit the KernelDef statement We do not need to replace the", "attrs=OrderedDict()): \"\"\"Construct a new tensor based on the shape and the compute function.", "field of an API. \"\"\" # check API correctness if not callable(fcompute): raise", "str(i), 0) for i in range(0, len(ret.shape))] non_reduce_ivs = [] indices = []", "arguments' names nargs = 0 # number of arguments if isinstance(fcompute, Module): args", "Scalar, Tensor, TensorSlice from schedule import Stage from debug import APIError from module", "dtype, shape) as stage: if not return_tensor: stage.input_stages.add(tensor.last_update) else: tensor = Tensor(shape, stage._dtype,", "[] rid = 0 for iv in lambda_ivs: if iv.var.name[0] == \"_\": indices.append(ret_ivs[rid])", "0)) stmt = stage.pop_stmt() stage.input_stages.remove(stage) if non_reduce_ivs: stmt = make_for(non_reduce_ivs, stmt, 0) else:", "n in range(0, nargs)] # call the helper function that returns a new", "t.last_update = stage stmt = None if ret is None: # replace all", "_expr.Expr, numbers.Number)): indices = lambda_ivs index, _, _ = get_index(shape, indices, 0) stage.emit(_make.Store(buffer_var,", "dtype self.index = index def mutate_KerenlDef(self, node): \"\"\"Omit the KernelDef statement We do", "rule shape : tuple, optional The output shape or the iteration domain dtype", "# reduction ret_ivs = [_IterVar((0, ret.shape[i]), ret.name+\"_i\" + str(i), 0) for i in", "lambda_ivs = [_IterVar((0, shape[n]), args[n], 0) for n in range(0, nargs)] # call", "defines the construction rule of the returned tensor, which must be callable. The", "for y in range(0, 10): A[x][y] = x + y # example 1.2", "the number of dimensions\") return args, len(shape) def compute_body(name, lambda_ivs, fcompute, shape=(), dtype=None,", "get_name, make_for, CastRemover from tensor import Scalar, Tensor, TensorSlice from schedule import Stage", "users more programming flexibility. The compute function specifies how we calculate each element", "ret = fcompute(*var_list) print(dir(ret)) print(dir(ret.a)) print(dir(ret.b)) stage.lhs_tensors.add(tensor) for t in stage.lhs_tensors: t.last_update =", "call the helper function that returns a new tensor tensor = compute_body(name, lambda_ivs,", "tensor based on the shape and the compute function. The API **returns a", "# example 1.3 - imperative function definition @hcl.def_([(), ()]) def addition(x, y): hcl.return_(x+y)", "be a tuple\") # properties for the returned tensor shape = CastRemover().mutate(shape) name", "return node def mutate_Return(self, node): \"\"\"Replace the Return statement with a Store statement", "raise APIError(\"The number of arguments exceeds the number of dimensions\") return args, len(shape)", "[_IterVar((0, ret.shape[i]), ret.name+\"_i\" + str(i), 0) for i in range(0, len(ret.shape))] non_reduce_ivs =", "tensor=None, attrs=OrderedDict()): \"\"\"Create a stage and perform the computation. If `tensor` is `None`,", "disable=no-self-use return node def mutate_Return(self, node): \"\"\"Replace the Return statement with a Store", "and perform the computation. If `tensor` is `None`, no tensor is returned. Parameters", "tensor._tensor = stage._op return tensor return None ############################################################################## # APIs exposed to users", "not return_tensor: stage.input_stages.add(tensor.last_update) else: tensor = Tensor(shape, stage._dtype, name, stage._buf) buffer_var = tensor._buf.data", "node def mutate_Return(self, node): \"\"\"Replace the Return statement with a Store statement \"\"\"", "= [i.var for i in lambda_ivs] return_tensor = True if tensor is None", "from tensor import Scalar, Tensor, TensorSlice from schedule import Stage from debug import", "redefined-builtin, too-many-arguments, missing-docstring import numbers from collections import OrderedDict from tvm import expr_hcl", "with Stage(name, dtype, shape) as stage: if not return_tensor: stage.input_stages.add(tensor.last_update) else: tensor =", "tensor : Tensor, optional The tensor to be updated. Create a new one", "buffer_var = tensor._buf.data dtype = tensor.dtype shape = tensor.shape stage.stmt_stack.append([]) ret = fcompute(*var_list)", "is returned. Parameters ---------- name : str The name of the stage lambda_ivs", "If `tensor` is `None`, no tensor is returned. Parameters ---------- name : str", "The computation rule shape : tuple, optional The output shape or the iteration", "type of the placeholder Returns ------- Tensor Examples -------- .. code-block:: python #", "from schedule import Stage from debug import APIError from module import Module ##############################################################################", "nargs = fcompute.__code__.co_argcount # automatically create argument names if nargs < len(shape): for", "ReplaceReturn(buffer_var, dtype, index).mutate(stmt) stmt = make_for(indices, stmt, 0) elif isinstance(ret, (TensorSlice, Scalar, _expr.Expr,", "= make_for(indices, stage.pop_stmt(), 0) elif isinstance(ret, Tensor): # reduction ret_ivs = [_IterVar((0, ret.shape[i]),", "iv.var.name[0] == \"_\": indices.append(ret_ivs[rid]) rid += 1 else: indices.append(iv) non_reduce_ivs.append(iv) if rid !=", "index of the Store statement \"\"\" def __init__(self, buffer_var, dtype, index): self.buffer_var =", "tensor dtype : Type, optional The data type of the placeholder Returns -------", "explicit function def addition(x, y): return x+y A = hcl.compute((10, 10), addition) #", "not callable(fcompute): raise APIError(\"The construction rule must be callable\") # prepare the iteration", "= _make.For(stmt.loop_var, stmt.min, stmt.extent, 0, 0, stmt.body, list(attrs.keys()), list(attrs.values())) stage.emit(stmt) stage.axis_list = indices", "buffer_var, dtype, index): self.buffer_var = buffer_var self.dtype = dtype self.index = index def", "fcompute : callable The computation rule shape : tuple, optional The output shape", "of arguments if isinstance(fcompute, Module): args = fcompute.arg_names nargs = len(args) else: args", "dtype = tensor.dtype shape = tensor.shape stage.stmt_stack.append([]) ret = fcompute(*var_list) print(dir(ret)) print(dir(ret.a)) print(dir(ret.b))", "index)) stmt = make_for(indices, stage.pop_stmt(), 0) elif isinstance(ret, Tensor): # reduction ret_ivs =", "The output shape or the iteration domain dtype : Type, optional The data", "= hcl.placeholder((10, 10, 10)) tA = compute_tanh(A) tB = compute_tanh(B) # example 3", "hcl.placeholder((10, 10, 10)) tA = compute_tanh(A) tB = compute_tanh(B) # example 3 -", "replace all hcl.return_ with Store stmt indices = lambda_ivs index, _, _ =", "_expr, stmt as _stmt from tvm.tir import IterVar as _IterVar from util import", "arguments if isinstance(fcompute, Module): args = fcompute.arg_names nargs = len(args) else: args =", "args = fcompute.arg_names nargs = len(args) else: args = list(fcompute.__code__.co_varnames) nargs = fcompute.__code__.co_argcount", "in stage.lhs_tensors: t.last_update = stage stmt = None if ret is None: #", "= 0 # number of arguments if isinstance(fcompute, Module): args = fcompute.arg_names nargs", "The name of the stage lambda_ivs : list of IterVar A list contains", "tensor**. The shape must be a tuple. The number of elements in the", "# call the helper function that returns a new tensor tensor = compute_body(name,", "attributes to the loop if isinstance(stmt, _stmt.For): stmt = _make.For(stmt.loop_var, stmt.min, stmt.extent, 0,", "= make_for(indices, stmt, 0) elif isinstance(ret, (TensorSlice, Scalar, _expr.Expr, numbers.Number)): indices = lambda_ivs", "10), addition) # example 2 - undetermined arguments def compute_tanh(X): return hcl.compute(X.shape, lambda", "match the dimension defined by `shape`, which *we do not check*. This, however,", "return type of the computation rule\") # add attributes to the loop if", "dimension defined by `shape`, which *we do not check*. This, however, provides users", "_, _ = get_index(shape, indices, 0) stage.emit(_make.Store(buffer_var, _make.Cast(dtype, ret), index)) stmt = make_for(indices,", "specifies how we calculate each element of the returned tensor. It can contain", "returned tensor shape = CastRemover().mutate(shape) name = get_name(\"compute\", name) # prepare the iteration", "contain other HeteroCL APIs, even imperative DSL. Parameters ---------- shape : tuple The", "+ y # example 1.2 - explicit function def addition(x, y): return x+y", "module import Module ############################################################################## # Helper classes and functions ############################################################################## class ReplaceReturn(CastRemover): \"\"\"Replace", "properties for the returned tensor shape = CastRemover().mutate(shape) name = get_name(\"compute\", name) #", "stmt.min, stmt.extent, 0, 0, stmt.body, list(attrs.keys()), list(attrs.values())) stage.emit(stmt) stage.axis_list = indices + stage.axis_list", "a new tensor based on the shape and the compute function. The API", "raise APIError(\"The construction rule must be callable\") # prepare the iteration variables args", "in lambda_ivs: if iv.var.name[0] == \"_\": indices.append(ret_ivs[rid]) rid += 1 else: indices.append(iv) non_reduce_ivs.append(iv)", "too-many-arguments, missing-docstring import numbers from collections import OrderedDict from tvm import expr_hcl as", "Tensor, TensorSlice from schedule import Stage from debug import APIError from module import", "is `None` Returns ------- Tensor or None \"\"\" var_list = [i.var for i", "create argument names if nargs < len(shape): for i in range(nargs, len(shape)): args.append(\"args\"", "This, however, provides users more programming flexibility. The compute function specifies how we", "= [] rid = 0 for iv in lambda_ivs: if iv.var.name[0] == \"_\":", "+= 1 else: indices.append(iv) non_reduce_ivs.append(iv) if rid != len(ret.shape): raise APIError(\"Incorrect number of", "The data type of the output/updated tensor tensor : Tensor, optional The tensor", "APIs, even imperative DSL. Parameters ---------- shape : tuple The shape of the", "the returned tensor. The second field `fcompute` defines the construction rule of the", "A = hcl.compute((10, 10), addition) # example 1.3 - imperative function definition @hcl.def_([(),", "isinstance(shape, tuple): raise APIError(\"The shape of compute API must be a tuple\") #", "The shape must be a tuple. The number of elements in the tuple", "len(ret.shape))] non_reduce_ivs = [] indices = [] rid = 0 for iv in", "Examples -------- .. code-block:: python # example 1.1 - anonymous lambda function A", "in range(0, 10): A[x][y] = x + y # example 1.2 - explicit", "= fcompute.arg_names nargs = len(args) else: args = list(fcompute.__code__.co_varnames) nargs = fcompute.__code__.co_argcount #", "argument names if nargs < len(shape): for i in range(nargs, len(shape)): args.append(\"args\" +", "raise APIError(\"Unknown return type of the computation rule\") # add attributes to the", "which *we do not check*. This, however, provides users more programming flexibility. The", "data type of the output/updated tensor tensor : Tensor, optional The tensor to", "index, _, _ = get_index(shape, indices, 0) st = _make.Store(buffer_var, _make.Cast(dtype, ret[tuple(ret_ivs)]), index)", "return None ############################################################################## # APIs exposed to users ############################################################################## def compute(shape, fcompute, name=None,", "returned tensor. It can contain other HeteroCL APIs, even imperative DSL. Parameters ----------", "10): A[x][y] = x + y # example 1.2 - explicit function def", "self.dtype = dtype self.index = index def mutate_KerenlDef(self, node): \"\"\"Omit the KernelDef statement", "-------- .. code-block:: python # example 1.1 - anonymous lambda function A =", "optional The output shape or the iteration domain dtype : Type, optional The", "node): \"\"\"Replace the Return statement with a Store statement \"\"\" return _make.Store(self.buffer_var, _make.Cast(self.dtype,", "addition) # example 2 - undetermined arguments def compute_tanh(X): return hcl.compute(X.shape, lambda *args:", "= process_fcompute(fcompute, shape) lambda_ivs = [_IterVar((0, shape[n]), args[n], 0) for n in range(0,", "if not return_tensor: stage.input_stages.add(tensor.last_update) else: tensor = Tensor(shape, stage._dtype, name, stage._buf) buffer_var =", "non_reduce_ivs: stmt = make_for(non_reduce_ivs, stmt, 0) else: raise APIError(\"Unknown return type of the", "prepare the iteration variables args, nargs = process_fcompute(fcompute, shape) lambda_ivs = [_IterVar((0, shape[n]),", "# replace all hcl.return_ with Store stmt indices = lambda_ivs index, _, _", "index, _, _ = get_index(shape, indices, 0) stmt = stage.pop_stmt() stmt = ReplaceReturn(buffer_var,", "in lambda arguments\") index, _, _ = get_index(shape, indices, 0) st = _make.Store(buffer_var,", "example 1.3 - imperative function definition @hcl.def_([(), ()]) def addition(x, y): hcl.return_(x+y) A", "10), addition) # example 1.3 - imperative function definition @hcl.def_([(), ()]) def addition(x,", "variables args, nargs = process_fcompute(fcompute, shape) lambda_ivs = [_IterVar((0, shape[n]), args[n], 0) for", "index) stage.emit(make_for(ret_ivs, st, 0)) stmt = stage.pop_stmt() stage.input_stages.remove(stage) if non_reduce_ivs: stmt = make_for(non_reduce_ivs,", "The data type of the Store statement index : Expr The index of", "+ str(i)) elif nargs > len(shape): raise APIError(\"The number of arguments exceeds the", "list of IterVar A list contains the iteration variables in the lambda function", "if exists fcompute : callable The computation rule shape : tuple, optional The", "from collections import OrderedDict from tvm import expr_hcl as _expr, stmt as _stmt", "st = _make.Store(buffer_var, _make.Cast(dtype, ret[tuple(ret_ivs)]), index) stage.emit(make_for(ret_ivs, st, 0)) stmt = stage.pop_stmt() stage.input_stages.remove(stage)", "()]) def addition(x, y): hcl.return_(x+y) A = hcl.compute((10, 10), addition) # example 2", "stage.stmt_stack.append([]) ret = fcompute(*var_list) print(dir(ret)) print(dir(ret.a)) print(dir(ret.b)) stage.lhs_tensors.add(tensor) for t in stage.lhs_tensors: t.last_update", "- mixed-paradigm programming def return_max(x, y): with hcl.if_(x > y): hcl.return_(x) with hcl.else_:", "= tensor._buf.data dtype = tensor.dtype shape = tensor.shape stage.stmt_stack.append([]) ret = fcompute(*var_list) print(dir(ret))", "returned tensor name : str, optional The name of the returned tensor dtype", "with hcl.else_: hcl.return_(y) A = hcl.compute((10, 10), return_max) \"\"\" # check API correctness", "statement We do not need to replace the Return statement inside. \"\"\" #pylint:", "tuple. The number of elements in the tuple decides the dimension of the", "of the output/updated tensor tensor : Tensor, optional The tensor to be updated.", "*we do not check*. This, however, provides users more programming flexibility. The compute", "disable=no-member, redefined-builtin, too-many-arguments, missing-docstring import numbers from collections import OrderedDict from tvm import", "of the placeholder Returns ------- Tensor Examples -------- .. code-block:: python # example", "iteration variables in the lambda function if exists fcompute : callable The computation", "rule must be callable\") # prepare the iteration variables args = [] #", "reduction axes in lambda arguments\") index, _, _ = get_index(shape, indices, 0) st", "the returned tensor. It can contain other HeteroCL APIs, even imperative DSL. Parameters", "if return_tensor: tensor._tensor = stage._op return tensor return None ############################################################################## # APIs exposed", "IterVar A list contains the iteration variables in the lambda function if exists", "on the shape and the compute function. The API **returns a new tensor**.", "elif isinstance(ret, Tensor): # reduction ret_ivs = [_IterVar((0, ret.shape[i]), ret.name+\"_i\" + str(i), 0)", ": Type The data type of the Store statement index : Expr The", "indices = lambda_ivs index, _, _ = get_index(shape, indices, 0) stmt = stage.pop_stmt()", "to the loop if isinstance(stmt, _stmt.For): stmt = _make.For(stmt.loop_var, stmt.min, stmt.extent, 0, 0,", "shape must be a tuple. The number of elements in the tuple decides", "the iteration variables args, nargs = process_fcompute(fcompute, shape) lambda_ivs = [_IterVar((0, shape[n]), args[n],", "data type of the Store statement index : Expr The index of the", "a Store statement. Attributes ---------- buffer_var : Var The buffer variable of the", "lambda_ivs: if iv.var.name[0] == \"_\": indices.append(ret_ivs[rid]) rid += 1 else: indices.append(iv) non_reduce_ivs.append(iv) if", "tensor.shape stage.stmt_stack.append([]) ret = fcompute(*var_list) print(dir(ret)) print(dir(ret.a)) print(dir(ret.b)) stage.lhs_tensors.add(tensor) for t in stage.lhs_tensors:", "tuple\") # properties for the returned tensor shape = CastRemover().mutate(shape) name = get_name(\"compute\",", "tensor._buf.data dtype = tensor.dtype shape = tensor.shape stage.stmt_stack.append([]) ret = fcompute(*var_list) print(dir(ret)) print(dir(ret.a))", "stage.axis_list if return_tensor: tensor._tensor = stage._op return tensor return None ############################################################################## # APIs", "can contain other HeteroCL APIs, even imperative DSL. Parameters ---------- shape : tuple", "Store statement dtype : Type The data type of the Store statement index", "tB = compute_tanh(B) # example 3 - mixed-paradigm programming def return_max(x, y): with", "a tuple\") # properties for the returned tensor shape = CastRemover().mutate(shape) name =", "APIError(\"The construction rule must be callable\") # prepare the iteration variables args =", "_, _ = get_index(shape, indices, 0) stmt = stage.pop_stmt() stmt = ReplaceReturn(buffer_var, dtype,", "as _expr, stmt as _stmt from tvm.tir import IterVar as _IterVar from util", "automatically create argument names if nargs < len(shape): for i in range(nargs, len(shape)):", "name of the stage lambda_ivs : list of IterVar A list contains the", "not check*. This, however, provides users more programming flexibility. The compute function specifies", "of arguments' names nargs = 0 # number of arguments if isinstance(fcompute, Module):", "hcl.return_(y) A = hcl.compute((10, 10), return_max) \"\"\" # check API correctness if not", "imperative DSL. Parameters ---------- shape : tuple The shape of the returned tensor", "args = list(fcompute.__code__.co_varnames) nargs = fcompute.__code__.co_argcount # automatically create argument names if nargs", "x + y # example 1.2 - explicit function def addition(x, y): return", "node): \"\"\"Omit the KernelDef statement We do not need to replace the Return", "Module ############################################################################## # Helper classes and functions ############################################################################## class ReplaceReturn(CastRemover): \"\"\"Replace all Return" ]
[ "if stunde < 0: raise StundeZuKlein(stunde) if stunde > 60: raise StundeZuGross(stunde) self.__stunde", "None self.stunde = stunde pass def __repr__(self): return f\"<Stunde {self.__str__()}>\" def __str__(self): return", "VergleichsStammZahl from Kronos_heureka_code.Zeit.Uhrzeit.Stunde.StundeException import * class Stunde(VergleichsStammZahl): def __init__(self, stunde: int): self.__stunde: [int,", "@stunde.setter def stunde(self, stunde: int): if type(stunde) != int: raise StundeKeineGanzeZahl(stunde) if stunde", "stunde pass def __repr__(self): return f\"<Stunde {self.__str__()}>\" def __str__(self): return str(self.stunde).zfill(2) if self.stunde", "\"00\" def __int__(self): return self.stunde @property def stunde(self): return self.__stunde @stunde.setter def stunde(self,", "raise StundeKeineGanzeZahl(stunde) if stunde < 0: raise StundeZuKlein(stunde) if stunde > 60: raise", "0: raise StundeZuKlein(stunde) if stunde > 60: raise StundeZuGross(stunde) self.__stunde = stunde pass", "__init__(self, stunde: int): self.__stunde: [int, None] = None self.stunde = stunde pass def", "= stunde pass def __repr__(self): return f\"<Stunde {self.__str__()}>\" def __str__(self): return str(self.stunde).zfill(2) if", "return self.__stunde @stunde.setter def stunde(self, stunde: int): if type(stunde) != int: raise StundeKeineGanzeZahl(stunde)", "stunde: int): if type(stunde) != int: raise StundeKeineGanzeZahl(stunde) if stunde < 0: raise", "type(stunde) != int: raise StundeKeineGanzeZahl(stunde) if stunde < 0: raise StundeZuKlein(stunde) if stunde", "def stunde(self, stunde: int): if type(stunde) != int: raise StundeKeineGanzeZahl(stunde) if stunde <", "int: raise StundeKeineGanzeZahl(stunde) if stunde < 0: raise StundeZuKlein(stunde) if stunde > 60:", "f\"<Stunde {self.__str__()}>\" def __str__(self): return str(self.stunde).zfill(2) if self.stunde else \"00\" def __int__(self): return", "int): if type(stunde) != int: raise StundeKeineGanzeZahl(stunde) if stunde < 0: raise StundeZuKlein(stunde)", "from Kronos_heureka_code.Zeit.Uhrzeit.Stunde.StundeException import * class Stunde(VergleichsStammZahl): def __init__(self, stunde: int): self.__stunde: [int, None]", "@property def stunde(self): return self.__stunde @stunde.setter def stunde(self, stunde: int): if type(stunde) !=", "* class Stunde(VergleichsStammZahl): def __init__(self, stunde: int): self.__stunde: [int, None] = None self.stunde", "return self.stunde @property def stunde(self): return self.__stunde @stunde.setter def stunde(self, stunde: int): if", "self.__stunde @stunde.setter def stunde(self, stunde: int): if type(stunde) != int: raise StundeKeineGanzeZahl(stunde) if", "raise StundeZuKlein(stunde) if stunde > 60: raise StundeZuGross(stunde) self.__stunde = stunde pass pass", "str(self.stunde).zfill(2) if self.stunde else \"00\" def __int__(self): return self.stunde @property def stunde(self): return", "Kronos_heureka_code.Zeit.Uhrzeit.Stunde.StundeException import * class Stunde(VergleichsStammZahl): def __init__(self, stunde: int): self.__stunde: [int, None] =", "__repr__(self): return f\"<Stunde {self.__str__()}>\" def __str__(self): return str(self.stunde).zfill(2) if self.stunde else \"00\" def", "self.stunde @property def stunde(self): return self.__stunde @stunde.setter def stunde(self, stunde: int): if type(stunde)", "Stunde(VergleichsStammZahl): def __init__(self, stunde: int): self.__stunde: [int, None] = None self.stunde = stunde", "self.__stunde: [int, None] = None self.stunde = stunde pass def __repr__(self): return f\"<Stunde", "stunde(self, stunde: int): if type(stunde) != int: raise StundeKeineGanzeZahl(stunde) if stunde < 0:", "def stunde(self): return self.__stunde @stunde.setter def stunde(self, stunde: int): if type(stunde) != int:", "def __init__(self, stunde: int): self.__stunde: [int, None] = None self.stunde = stunde pass", "def __repr__(self): return f\"<Stunde {self.__str__()}>\" def __str__(self): return str(self.stunde).zfill(2) if self.stunde else \"00\"", "return str(self.stunde).zfill(2) if self.stunde else \"00\" def __int__(self): return self.stunde @property def stunde(self):", "< 0: raise StundeZuKlein(stunde) if stunde > 60: raise StundeZuGross(stunde) self.__stunde = stunde", "None] = None self.stunde = stunde pass def __repr__(self): return f\"<Stunde {self.__str__()}>\" def", "if type(stunde) != int: raise StundeKeineGanzeZahl(stunde) if stunde < 0: raise StundeZuKlein(stunde) if", "= None self.stunde = stunde pass def __repr__(self): return f\"<Stunde {self.__str__()}>\" def __str__(self):", "pass def __repr__(self): return f\"<Stunde {self.__str__()}>\" def __str__(self): return str(self.stunde).zfill(2) if self.stunde else", "stunde: int): self.__stunde: [int, None] = None self.stunde = stunde pass def __repr__(self):", "{self.__str__()}>\" def __str__(self): return str(self.stunde).zfill(2) if self.stunde else \"00\" def __int__(self): return self.stunde", "!= int: raise StundeKeineGanzeZahl(stunde) if stunde < 0: raise StundeZuKlein(stunde) if stunde >", "else \"00\" def __int__(self): return self.stunde @property def stunde(self): return self.__stunde @stunde.setter def", "Kronos_heureka_code.__VergleichsStamm import VergleichsStammZahl from Kronos_heureka_code.Zeit.Uhrzeit.Stunde.StundeException import * class Stunde(VergleichsStammZahl): def __init__(self, stunde: int):", "import VergleichsStammZahl from Kronos_heureka_code.Zeit.Uhrzeit.Stunde.StundeException import * class Stunde(VergleichsStammZahl): def __init__(self, stunde: int): self.__stunde:", "class Stunde(VergleichsStammZahl): def __init__(self, stunde: int): self.__stunde: [int, None] = None self.stunde =", "import * class Stunde(VergleichsStammZahl): def __init__(self, stunde: int): self.__stunde: [int, None] = None", "if self.stunde else \"00\" def __int__(self): return self.stunde @property def stunde(self): return self.__stunde", "self.stunde = stunde pass def __repr__(self): return f\"<Stunde {self.__str__()}>\" def __str__(self): return str(self.stunde).zfill(2)", "[int, None] = None self.stunde = stunde pass def __repr__(self): return f\"<Stunde {self.__str__()}>\"", "def __str__(self): return str(self.stunde).zfill(2) if self.stunde else \"00\" def __int__(self): return self.stunde @property", "return f\"<Stunde {self.__str__()}>\" def __str__(self): return str(self.stunde).zfill(2) if self.stunde else \"00\" def __int__(self):", "__str__(self): return str(self.stunde).zfill(2) if self.stunde else \"00\" def __int__(self): return self.stunde @property def", "def __int__(self): return self.stunde @property def stunde(self): return self.__stunde @stunde.setter def stunde(self, stunde:", "StundeKeineGanzeZahl(stunde) if stunde < 0: raise StundeZuKlein(stunde) if stunde > 60: raise StundeZuGross(stunde)", "stunde < 0: raise StundeZuKlein(stunde) if stunde > 60: raise StundeZuGross(stunde) self.__stunde =", "__int__(self): return self.stunde @property def stunde(self): return self.__stunde @stunde.setter def stunde(self, stunde: int):", "int): self.__stunde: [int, None] = None self.stunde = stunde pass def __repr__(self): return", "self.stunde else \"00\" def __int__(self): return self.stunde @property def stunde(self): return self.__stunde @stunde.setter", "stunde(self): return self.__stunde @stunde.setter def stunde(self, stunde: int): if type(stunde) != int: raise", "from Kronos_heureka_code.__VergleichsStamm import VergleichsStammZahl from Kronos_heureka_code.Zeit.Uhrzeit.Stunde.StundeException import * class Stunde(VergleichsStammZahl): def __init__(self, stunde:" ]
[ "-*- from django.contrib.auth.backends import ModelBackend from django.contrib.auth.models import User class DKSSOBlindTrustAuthenticator(ModelBackend): def authenticate(self,", "utf-8 -*- from django.contrib.auth.backends import ModelBackend from django.contrib.auth.models import User class DKSSOBlindTrustAuthenticator(ModelBackend): def", "User class DKSSOBlindTrustAuthenticator(ModelBackend): def authenticate(self, username=None, password=<PASSWORD>, **kw): if not kw.get('sso_login'): return None", "from django.contrib.auth.models import User class DKSSOBlindTrustAuthenticator(ModelBackend): def authenticate(self, username=None, password=<PASSWORD>, **kw): if not", "ModelBackend from django.contrib.auth.models import User class DKSSOBlindTrustAuthenticator(ModelBackend): def authenticate(self, username=None, password=<PASSWORD>, **kw): if", "import User class DKSSOBlindTrustAuthenticator(ModelBackend): def authenticate(self, username=None, password=<PASSWORD>, **kw): if not kw.get('sso_login'): return", "import ModelBackend from django.contrib.auth.models import User class DKSSOBlindTrustAuthenticator(ModelBackend): def authenticate(self, username=None, password=<PASSWORD>, **kw):", "django.contrib.auth.models import User class DKSSOBlindTrustAuthenticator(ModelBackend): def authenticate(self, username=None, password=<PASSWORD>, **kw): if not kw.get('sso_login'):", "class DKSSOBlindTrustAuthenticator(ModelBackend): def authenticate(self, username=None, password=<PASSWORD>, **kw): if not kw.get('sso_login'): return None return", "-*- coding: utf-8 -*- from django.contrib.auth.backends import ModelBackend from django.contrib.auth.models import User class", "# -*- coding: utf-8 -*- from django.contrib.auth.backends import ModelBackend from django.contrib.auth.models import User", "django.contrib.auth.backends import ModelBackend from django.contrib.auth.models import User class DKSSOBlindTrustAuthenticator(ModelBackend): def authenticate(self, username=None, password=<PASSWORD>,", "<gh_stars>0 # -*- coding: utf-8 -*- from django.contrib.auth.backends import ModelBackend from django.contrib.auth.models import", "DKSSOBlindTrustAuthenticator(ModelBackend): def authenticate(self, username=None, password=<PASSWORD>, **kw): if not kw.get('sso_login'): return None return User.objects.get(username=username)", "coding: utf-8 -*- from django.contrib.auth.backends import ModelBackend from django.contrib.auth.models import User class DKSSOBlindTrustAuthenticator(ModelBackend):", "from django.contrib.auth.backends import ModelBackend from django.contrib.auth.models import User class DKSSOBlindTrustAuthenticator(ModelBackend): def authenticate(self, username=None," ]
[ "absolute deviation from mean from feature distribution return np.mean(np.absolute(self.dataset[self.column] - np.mean(self.dataset[self.column]))) def absolute_deviation_from_median(self):", "factor from feature distribution return scipy.stats.skew(self.dataset[self.column]) def entropy(self): # call for entropy from", "variation from feature distribution return scipy.stats.variation(self.dataset[self.column]) def gini_coefficient(self): # call for gini coefficient", "discrete numerical' def average(self): # TODO: remove return np.average(self.dataset[self.column]) def expected_value(self): # call", "call for coefficient of variation from feature distribution return scipy.stats.variation(self.dataset[self.column]) def gini_coefficient(self): #", "median from feature distribution return np.median(self.dataset[self.column]) def mode(self): # call for mode from", "def gini_coefficient(self): # call for gini coefficient from feature distribution # TODO: refactorize", "# TODO: refactorize mad = np.abs(np.subtract.outer(self.dataset[self.column], self.dataset[self.column])).mean() rmad = mad/np.mean(self.dataset[self.column]) return 0.5 *", "covers the most general feature statistics used in data analysis. ''' def __init__(self):", "asymmetry factor from feature distribution return scipy.stats.skew(self.dataset[self.column]) def entropy(self): # call for entropy", "fig.savefig('static/plot{}.png'.format(plot_number + 1), dpi=fig.dpi) # return fig # plt.show() def measurement(self): # call", "adjusted with accordance to display size sns.set_style(\"whitegrid\") fig, ax = plt.subplots() fig.set_size_inches(12, 12)", "-- quantitive discrete numerical if self.dataset[self.column].dtypes == 'float64': for value in self.dataset[self.column].values: if", "np.average(self.dataset[self.column]) def expected_value(self): # call for expected value from feature distribution return np.mean(self.dataset[self.column])", "as plt import seaborn as sns import scipy from statsmodels import robust class", ",25]) return (q75 - q25) def coefficient_of_variation(self): # call for coefficient of variation", "call for mode from feature distribution return scipy.stats.median_absolute_deviation(self.dataset[self.column]) def quarter_deviation(self): # call for", "return 'quantitive discrete categorical' else: return 'quantitive discrete numerical' def average(self): # TODO:", "standard_deviation(self): # call for standard deviation from feature distribution return np.std(self.dataset[self.column]) def absolute_deviation_from_mean(self):", "for quarter devaition from feature distribution q75, q25 = np.percentile(self.dataset[self.column], [75 ,25]) return", "for mode from feature distribution return scipy.stats.mode(self.dataset[self.column]) def standard_deviation(self): # call for standard", "as pd import numpy as np import matplotlib.pyplot as plt import seaborn as", "average(self): # TODO: remove return np.average(self.dataset[self.column]) def expected_value(self): # call for expected value", "def expected_value(self): # call for expected value from feature distribution return np.mean(self.dataset[self.column]) def", "data analysis. ''' def __init__(self): # Handled by cursor in common.py file in", "float(value) != int(value): return 'quantitive continous' if len(pd.unique(self.dataset[self.column])) == 2: return 'quantitive discrete", "quarter_deviation(self): # call for quarter devaition from feature distribution q75, q25 = np.percentile(self.dataset[self.column],", "''' Display statistics from every numerical column in data set. Base class for", "def mode(self): # call for mode from feature distribution return scipy.stats.mode(self.dataset[self.column]) def standard_deviation(self):", "- np.mean(self.dataset[self.column]))) def absolute_deviation_from_median(self): # call for mode from feature distribution return scipy.stats.median_absolute_deviation(self.dataset[self.column])", "if len(pd.unique(self.dataset[self.column])) == 2: return 'quantitive discrete categorical' else: return 'quantitive discrete numerical'", "self.dataset[self.column].dtypes == 'float64': for value in self.dataset[self.column].values: if float(value) != int(value): return 'quantitive", "each histogram plot in the page. Class covers the most general feature statistics", "(after hoover), in each histogram plot in the page. Class covers the most", "quantitive continous # -- quantitive discrete categorical # -- quantitive discrete numerical if", "scipy.stats.mode(self.dataset[self.column]) def standard_deviation(self): # call for standard deviation from feature distribution return np.std(self.dataset[self.column])", "Mutual description instance. Outcomes are represented from the beggining (after hoover), in each", "ax=sns.distplot(self.dataset.iloc[:, [plot_number]], rug=True, color='k') fig.patch.set_alpha(0.0) plt.xticks(fontsize=25) plt.yticks(fontsize=25) fig.savefig('static/plot{}.png'.format(plot_number + 1), dpi=fig.dpi) # return", "numerical column in data set. Base class for Mutual description instance. Outcomes are", "data set. Base class for Mutual description instance. Outcomes are represented from the", "plot_number): # Generate histogram and save as a static file # size and", "Outcomes are represented from the beggining (after hoover), in each histogram plot in", "analysis. ''' def __init__(self): # Handled by cursor in common.py file in `Mutual_description`", "outcomes are: # -- quantitive continous # -- quantitive discrete categorical # --", "for standard deviation from feature distribution return np.std(self.dataset[self.column]) def absolute_deviation_from_mean(self): # call for", "statsmodels import robust class Singular_description(object): ''' Display statistics from every numerical column in", "accordance to display size sns.set_style(\"whitegrid\") fig, ax = plt.subplots() fig.set_size_inches(12, 12) ax=sns.distplot(self.dataset.iloc[:, [plot_number]],", "numerical if self.dataset[self.column].dtypes == 'float64': for value in self.dataset[self.column].values: if float(value) != int(value):", "2: return 'quantitive discrete categorical' else: return 'quantitive discrete numerical' def average(self): #", "for gini coefficient from feature distribution # TODO: refactorize mad = np.abs(np.subtract.outer(self.dataset[self.column], self.dataset[self.column])).mean()", "call for absolute deviation from mean from feature distribution return np.mean(np.absolute(self.dataset[self.column] - np.mean(self.dataset[self.column])))", "def quarter_deviation(self): # call for quarter devaition from feature distribution q75, q25 =", "feature distribution return scipy.stats.mode(self.dataset[self.column]) def standard_deviation(self): # call for standard deviation from feature", "def standard_deviation(self): # call for standard deviation from feature distribution return np.std(self.dataset[self.column]) def", "if float(value) != int(value): return 'quantitive continous' if len(pd.unique(self.dataset[self.column])) == 2: return 'quantitive", "'quantitive discrete categorical' else: return 'quantitive discrete numerical' def average(self): # TODO: remove", "return (q75 - q25) def coefficient_of_variation(self): # call for coefficient of variation from", "rmad def asymmetry_factor(self): # call for asymmetry factor from feature distribution return scipy.stats.skew(self.dataset[self.column])", "[plot_number]], rug=True, color='k') fig.patch.set_alpha(0.0) plt.xticks(fontsize=25) plt.yticks(fontsize=25) fig.savefig('static/plot{}.png'.format(plot_number + 1), dpi=fig.dpi) # return fig", "coefficient of variation from feature distribution return scipy.stats.variation(self.dataset[self.column]) def gini_coefficient(self): # call for", "robust class Singular_description(object): ''' Display statistics from every numerical column in data set.", "if self.dataset[self.column].dtypes == 'float64': for value in self.dataset[self.column].values: if float(value) != int(value): return", "np import matplotlib.pyplot as plt import seaborn as sns import scipy from statsmodels", "distribution return scipy.stats.skew(self.dataset[self.column]) def entropy(self): # call for entropy from feature distribution return", "remove return np.average(self.dataset[self.column]) def expected_value(self): # call for expected value from feature distribution", "# call for measurement category of the feature # possible outcomes are: #", "for measurement category of the feature # possible outcomes are: # -- quantitive", "from feature distribution return scipy.stats.skew(self.dataset[self.column]) def entropy(self): # call for entropy from feature", "# call for mode from feature distribution return scipy.stats.mode(self.dataset[self.column]) def standard_deviation(self): # call", "discrete numerical if self.dataset[self.column].dtypes == 'float64': for value in self.dataset[self.column].values: if float(value) !=", "color='k') fig.patch.set_alpha(0.0) plt.xticks(fontsize=25) plt.yticks(fontsize=25) fig.savefig('static/plot{}.png'.format(plot_number + 1), dpi=fig.dpi) # return fig # plt.show()", "\"\" def histogram(self, plot_number): # Generate histogram and save as a static file", "save as a static file # size and ticks are adjusted with accordance", "''' def __init__(self): # Handled by cursor in common.py file in `Mutual_description` self.column", "call for gini coefficient from feature distribution # TODO: refactorize mad = np.abs(np.subtract.outer(self.dataset[self.column],", "deviation from mean from feature distribution return np.mean(np.absolute(self.dataset[self.column] - np.mean(self.dataset[self.column]))) def absolute_deviation_from_median(self): #", "return 0.5 * rmad def asymmetry_factor(self): # call for asymmetry factor from feature", "def histogram(self, plot_number): # Generate histogram and save as a static file #", "the beggining (after hoover), in each histogram plot in the page. Class covers", "expected_value(self): # call for expected value from feature distribution return np.mean(self.dataset[self.column]) def median(self):", "continous' if len(pd.unique(self.dataset[self.column])) == 2: return 'quantitive discrete categorical' else: return 'quantitive discrete", "absolute_deviation_from_median(self): # call for mode from feature distribution return scipy.stats.median_absolute_deviation(self.dataset[self.column]) def quarter_deviation(self): #", "[75 ,25]) return (q75 - q25) def coefficient_of_variation(self): # call for coefficient of", "TODO: remove return np.average(self.dataset[self.column]) def expected_value(self): # call for expected value from feature", "distribution return scipy.stats.median_absolute_deviation(self.dataset[self.column]) def quarter_deviation(self): # call for quarter devaition from feature distribution", "quantitive discrete categorical # -- quantitive discrete numerical if self.dataset[self.column].dtypes == 'float64': for", "histogram(self, plot_number): # Generate histogram and save as a static file # size", "return scipy.stats.mode(self.dataset[self.column]) def standard_deviation(self): # call for standard deviation from feature distribution return", "feature statistics used in data analysis. ''' def __init__(self): # Handled by cursor", "# TODO: remove return np.average(self.dataset[self.column]) def expected_value(self): # call for expected value from", "import pandas as pd import numpy as np import matplotlib.pyplot as plt import", "median(self): # call for median from feature distribution return np.median(self.dataset[self.column]) def mode(self): #", "sns import scipy from statsmodels import robust class Singular_description(object): ''' Display statistics from", "return scipy.stats.median_absolute_deviation(self.dataset[self.column]) def quarter_deviation(self): # call for quarter devaition from feature distribution q75,", "self.dataset[self.column])).mean() rmad = mad/np.mean(self.dataset[self.column]) return 0.5 * rmad def asymmetry_factor(self): # call for", "'float64': for value in self.dataset[self.column].values: if float(value) != int(value): return 'quantitive continous' if", "# -- quantitive discrete numerical if self.dataset[self.column].dtypes == 'float64': for value in self.dataset[self.column].values:", "Singular_description(object): ''' Display statistics from every numerical column in data set. Base class", "of variation from feature distribution return scipy.stats.variation(self.dataset[self.column]) def gini_coefficient(self): # call for gini", "= mad/np.mean(self.dataset[self.column]) return 0.5 * rmad def asymmetry_factor(self): # call for asymmetry factor", "+ 1), dpi=fig.dpi) # return fig # plt.show() def measurement(self): # call for", "are: # -- quantitive continous # -- quantitive discrete categorical # -- quantitive", "np.median(self.dataset[self.column]) def mode(self): # call for mode from feature distribution return scipy.stats.mode(self.dataset[self.column]) def", "# call for mode from feature distribution return scipy.stats.median_absolute_deviation(self.dataset[self.column]) def quarter_deviation(self): # call", "feature distribution return np.median(self.dataset[self.column]) def mode(self): # call for mode from feature distribution", "return 'quantitive continous' if len(pd.unique(self.dataset[self.column])) == 2: return 'quantitive discrete categorical' else: return", "else: return 'quantitive discrete numerical' def average(self): # TODO: remove return np.average(self.dataset[self.column]) def", "feature distribution return scipy.stats.variation(self.dataset[self.column]) def gini_coefficient(self): # call for gini coefficient from feature", "description instance. Outcomes are represented from the beggining (after hoover), in each histogram", "in data analysis. ''' def __init__(self): # Handled by cursor in common.py file", "def absolute_deviation_from_median(self): # call for mode from feature distribution return scipy.stats.median_absolute_deviation(self.dataset[self.column]) def quarter_deviation(self):", "gini_coefficient(self): # call for gini coefficient from feature distribution # TODO: refactorize mad", "ax = plt.subplots() fig.set_size_inches(12, 12) ax=sns.distplot(self.dataset.iloc[:, [plot_number]], rug=True, color='k') fig.patch.set_alpha(0.0) plt.xticks(fontsize=25) plt.yticks(fontsize=25) fig.savefig('static/plot{}.png'.format(plot_number", "category of the feature # possible outcomes are: # -- quantitive continous #", "- q25) def coefficient_of_variation(self): # call for coefficient of variation from feature distribution", "from feature distribution return np.mean(np.absolute(self.dataset[self.column] - np.mean(self.dataset[self.column]))) def absolute_deviation_from_median(self): # call for mode", "distribution return np.median(self.dataset[self.column]) def mode(self): # call for mode from feature distribution return", "the page. Class covers the most general feature statistics used in data analysis.", "return np.average(self.dataset[self.column]) def expected_value(self): # call for expected value from feature distribution return", "from feature distribution q75, q25 = np.percentile(self.dataset[self.column], [75 ,25]) return (q75 - q25)", "discrete categorical # -- quantitive discrete numerical if self.dataset[self.column].dtypes == 'float64': for value", "ticks are adjusted with accordance to display size sns.set_style(\"whitegrid\") fig, ax = plt.subplots()", "coefficient from feature distribution # TODO: refactorize mad = np.abs(np.subtract.outer(self.dataset[self.column], self.dataset[self.column])).mean() rmad =", "return fig # plt.show() def measurement(self): # call for measurement category of the", "distribution return np.mean(np.absolute(self.dataset[self.column] - np.mean(self.dataset[self.column]))) def absolute_deviation_from_median(self): # call for mode from feature", "numerical' def average(self): # TODO: remove return np.average(self.dataset[self.column]) def expected_value(self): # call for", "dpi=fig.dpi) # return fig # plt.show() def measurement(self): # call for measurement category", "# call for quarter devaition from feature distribution q75, q25 = np.percentile(self.dataset[self.column], [75", "def median(self): # call for median from feature distribution return np.median(self.dataset[self.column]) def mode(self):", "asymmetry_factor(self): # call for asymmetry factor from feature distribution return scipy.stats.skew(self.dataset[self.column]) def entropy(self):", "* rmad def asymmetry_factor(self): # call for asymmetry factor from feature distribution return", "import robust class Singular_description(object): ''' Display statistics from every numerical column in data", "quarter devaition from feature distribution q75, q25 = np.percentile(self.dataset[self.column], [75 ,25]) return (q75", "'quantitive continous' if len(pd.unique(self.dataset[self.column])) == 2: return 'quantitive discrete categorical' else: return 'quantitive", "plot in the page. Class covers the most general feature statistics used in", "for coefficient of variation from feature distribution return scipy.stats.variation(self.dataset[self.column]) def gini_coefficient(self): # call", "as a static file # size and ticks are adjusted with accordance to", "categorical # -- quantitive discrete numerical if self.dataset[self.column].dtypes == 'float64': for value in", "categorical' else: return 'quantitive discrete numerical' def average(self): # TODO: remove return np.average(self.dataset[self.column])", "== 2: return 'quantitive discrete categorical' else: return 'quantitive discrete numerical' def average(self):", "== 'float64': for value in self.dataset[self.column].values: if float(value) != int(value): return 'quantitive continous'", "from feature distribution return np.std(self.dataset[self.column]) def absolute_deviation_from_mean(self): # call for absolute deviation from", "the feature # possible outcomes are: # -- quantitive continous # -- quantitive", "devaition from feature distribution q75, q25 = np.percentile(self.dataset[self.column], [75 ,25]) return (q75 -", "return scipy.stats.variation(self.dataset[self.column]) def gini_coefficient(self): # call for gini coefficient from feature distribution #", "0.5 * rmad def asymmetry_factor(self): # call for asymmetry factor from feature distribution", "fig.set_size_inches(12, 12) ax=sns.distplot(self.dataset.iloc[:, [plot_number]], rug=True, color='k') fig.patch.set_alpha(0.0) plt.xticks(fontsize=25) plt.yticks(fontsize=25) fig.savefig('static/plot{}.png'.format(plot_number + 1), dpi=fig.dpi)", "feature distribution return scipy.stats.median_absolute_deviation(self.dataset[self.column]) def quarter_deviation(self): # call for quarter devaition from feature", "file # size and ticks are adjusted with accordance to display size sns.set_style(\"whitegrid\")", "# call for absolute deviation from mean from feature distribution return np.mean(np.absolute(self.dataset[self.column] -", "the most general feature statistics used in data analysis. ''' def __init__(self): #", "distribution return scipy.stats.variation(self.dataset[self.column]) def gini_coefficient(self): # call for gini coefficient from feature distribution", "to display size sns.set_style(\"whitegrid\") fig, ax = plt.subplots() fig.set_size_inches(12, 12) ax=sns.distplot(self.dataset.iloc[:, [plot_number]], rug=True,", "as np import matplotlib.pyplot as plt import seaborn as sns import scipy from", "!= int(value): return 'quantitive continous' if len(pd.unique(self.dataset[self.column])) == 2: return 'quantitive discrete categorical'", "in self.dataset[self.column].values: if float(value) != int(value): return 'quantitive continous' if len(pd.unique(self.dataset[self.column])) == 2:", "for asymmetry factor from feature distribution return scipy.stats.skew(self.dataset[self.column]) def entropy(self): # call for", "distribution return scipy.stats.mode(self.dataset[self.column]) def standard_deviation(self): # call for standard deviation from feature distribution", "rmad = mad/np.mean(self.dataset[self.column]) return 0.5 * rmad def asymmetry_factor(self): # call for asymmetry", "statistics from every numerical column in data set. Base class for Mutual description", "common.py file in `Mutual_description` self.column = \"\" def histogram(self, plot_number): # Generate histogram", "import numpy as np import matplotlib.pyplot as plt import seaborn as sns import", "int(value): return 'quantitive continous' if len(pd.unique(self.dataset[self.column])) == 2: return 'quantitive discrete categorical' else:", "column in data set. Base class for Mutual description instance. Outcomes are represented", "by cursor in common.py file in `Mutual_description` self.column = \"\" def histogram(self, plot_number):", "# call for coefficient of variation from feature distribution return scipy.stats.variation(self.dataset[self.column]) def gini_coefficient(self):", "feature distribution return np.std(self.dataset[self.column]) def absolute_deviation_from_mean(self): # call for absolute deviation from mean", "from feature distribution return np.mean(self.dataset[self.column]) def median(self): # call for median from feature", "def average(self): # TODO: remove return np.average(self.dataset[self.column]) def expected_value(self): # call for expected", "plt.show() def measurement(self): # call for measurement category of the feature # possible", "histogram and save as a static file # size and ticks are adjusted", "12) ax=sns.distplot(self.dataset.iloc[:, [plot_number]], rug=True, color='k') fig.patch.set_alpha(0.0) plt.xticks(fontsize=25) plt.yticks(fontsize=25) fig.savefig('static/plot{}.png'.format(plot_number + 1), dpi=fig.dpi) #", "measurement(self): # call for measurement category of the feature # possible outcomes are:", "call for standard deviation from feature distribution return np.std(self.dataset[self.column]) def absolute_deviation_from_mean(self): # call", "# Handled by cursor in common.py file in `Mutual_description` self.column = \"\" def", "= \"\" def histogram(self, plot_number): # Generate histogram and save as a static", "scipy.stats.median_absolute_deviation(self.dataset[self.column]) def quarter_deviation(self): # call for quarter devaition from feature distribution q75, q25", "coefficient_of_variation(self): # call for coefficient of variation from feature distribution return scipy.stats.variation(self.dataset[self.column]) def", "def measurement(self): # call for measurement category of the feature # possible outcomes", "def asymmetry_factor(self): # call for asymmetry factor from feature distribution return scipy.stats.skew(self.dataset[self.column]) def", "in each histogram plot in the page. Class covers the most general feature", "return np.median(self.dataset[self.column]) def mode(self): # call for mode from feature distribution return scipy.stats.mode(self.dataset[self.column])", "pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn", "from feature distribution # TODO: refactorize mad = np.abs(np.subtract.outer(self.dataset[self.column], self.dataset[self.column])).mean() rmad = mad/np.mean(self.dataset[self.column])", "self.dataset[self.column].values: if float(value) != int(value): return 'quantitive continous' if len(pd.unique(self.dataset[self.column])) == 2: return", "mode(self): # call for mode from feature distribution return scipy.stats.mode(self.dataset[self.column]) def standard_deviation(self): #", "q75, q25 = np.percentile(self.dataset[self.column], [75 ,25]) return (q75 - q25) def coefficient_of_variation(self): #", "feature distribution q75, q25 = np.percentile(self.dataset[self.column], [75 ,25]) return (q75 - q25) def", "deviation from feature distribution return np.std(self.dataset[self.column]) def absolute_deviation_from_mean(self): # call for absolute deviation", "set. Base class for Mutual description instance. Outcomes are represented from the beggining", "seaborn as sns import scipy from statsmodels import robust class Singular_description(object): ''' Display", "call for asymmetry factor from feature distribution return scipy.stats.skew(self.dataset[self.column]) def entropy(self): # call", "Handled by cursor in common.py file in `Mutual_description` self.column = \"\" def histogram(self,", "call for expected value from feature distribution return np.mean(self.dataset[self.column]) def median(self): # call", "size sns.set_style(\"whitegrid\") fig, ax = plt.subplots() fig.set_size_inches(12, 12) ax=sns.distplot(self.dataset.iloc[:, [plot_number]], rug=True, color='k') fig.patch.set_alpha(0.0)", "scipy.stats.variation(self.dataset[self.column]) def gini_coefficient(self): # call for gini coefficient from feature distribution # TODO:", "call for measurement category of the feature # possible outcomes are: # --", "# -- quantitive discrete categorical # -- quantitive discrete numerical if self.dataset[self.column].dtypes ==", "def __init__(self): # Handled by cursor in common.py file in `Mutual_description` self.column =", "distribution # TODO: refactorize mad = np.abs(np.subtract.outer(self.dataset[self.column], self.dataset[self.column])).mean() rmad = mad/np.mean(self.dataset[self.column]) return 0.5", "represented from the beggining (after hoover), in each histogram plot in the page.", "Base class for Mutual description instance. Outcomes are represented from the beggining (after", "from mean from feature distribution return np.mean(np.absolute(self.dataset[self.column] - np.mean(self.dataset[self.column]))) def absolute_deviation_from_median(self): # call", "def coefficient_of_variation(self): # call for coefficient of variation from feature distribution return scipy.stats.variation(self.dataset[self.column])", "for absolute deviation from mean from feature distribution return np.mean(np.absolute(self.dataset[self.column] - np.mean(self.dataset[self.column]))) def", "matplotlib.pyplot as plt import seaborn as sns import scipy from statsmodels import robust", "with accordance to display size sns.set_style(\"whitegrid\") fig, ax = plt.subplots() fig.set_size_inches(12, 12) ax=sns.distplot(self.dataset.iloc[:,", "return scipy.stats.skew(self.dataset[self.column]) def entropy(self): # call for entropy from feature distribution return scipy.stats.entropy(self.dataset[self.column])", "from feature distribution return scipy.stats.median_absolute_deviation(self.dataset[self.column]) def quarter_deviation(self): # call for quarter devaition from", "refactorize mad = np.abs(np.subtract.outer(self.dataset[self.column], self.dataset[self.column])).mean() rmad = mad/np.mean(self.dataset[self.column]) return 0.5 * rmad def", "call for quarter devaition from feature distribution q75, q25 = np.percentile(self.dataset[self.column], [75 ,25])", "hoover), in each histogram plot in the page. Class covers the most general", "plt.xticks(fontsize=25) plt.yticks(fontsize=25) fig.savefig('static/plot{}.png'.format(plot_number + 1), dpi=fig.dpi) # return fig # plt.show() def measurement(self):", "mad/np.mean(self.dataset[self.column]) return 0.5 * rmad def asymmetry_factor(self): # call for asymmetry factor from", "standard deviation from feature distribution return np.std(self.dataset[self.column]) def absolute_deviation_from_mean(self): # call for absolute", "mean from feature distribution return np.mean(np.absolute(self.dataset[self.column] - np.mean(self.dataset[self.column]))) def absolute_deviation_from_median(self): # call for", "return np.mean(np.absolute(self.dataset[self.column] - np.mean(self.dataset[self.column]))) def absolute_deviation_from_median(self): # call for mode from feature distribution", "import seaborn as sns import scipy from statsmodels import robust class Singular_description(object): '''", "# call for median from feature distribution return np.median(self.dataset[self.column]) def mode(self): # call", "np.mean(self.dataset[self.column]))) def absolute_deviation_from_median(self): # call for mode from feature distribution return scipy.stats.median_absolute_deviation(self.dataset[self.column]) def", "# -- quantitive continous # -- quantitive discrete categorical # -- quantitive discrete", "Display statistics from every numerical column in data set. Base class for Mutual", "mode from feature distribution return scipy.stats.mode(self.dataset[self.column]) def standard_deviation(self): # call for standard deviation", "from every numerical column in data set. Base class for Mutual description instance.", "return np.mean(self.dataset[self.column]) def median(self): # call for median from feature distribution return np.median(self.dataset[self.column])", "file in `Mutual_description` self.column = \"\" def histogram(self, plot_number): # Generate histogram and", "discrete categorical' else: return 'quantitive discrete numerical' def average(self): # TODO: remove return", "call for mode from feature distribution return scipy.stats.mode(self.dataset[self.column]) def standard_deviation(self): # call for", "continous # -- quantitive discrete categorical # -- quantitive discrete numerical if self.dataset[self.column].dtypes", "in common.py file in `Mutual_description` self.column = \"\" def histogram(self, plot_number): # Generate", "are adjusted with accordance to display size sns.set_style(\"whitegrid\") fig, ax = plt.subplots() fig.set_size_inches(12,", "for value in self.dataset[self.column].values: if float(value) != int(value): return 'quantitive continous' if len(pd.unique(self.dataset[self.column]))", "general feature statistics used in data analysis. ''' def __init__(self): # Handled by", "in `Mutual_description` self.column = \"\" def histogram(self, plot_number): # Generate histogram and save", "and ticks are adjusted with accordance to display size sns.set_style(\"whitegrid\") fig, ax =", "feature distribution # TODO: refactorize mad = np.abs(np.subtract.outer(self.dataset[self.column], self.dataset[self.column])).mean() rmad = mad/np.mean(self.dataset[self.column]) return", "fig, ax = plt.subplots() fig.set_size_inches(12, 12) ax=sns.distplot(self.dataset.iloc[:, [plot_number]], rug=True, color='k') fig.patch.set_alpha(0.0) plt.xticks(fontsize=25) plt.yticks(fontsize=25)", "# call for gini coefficient from feature distribution # TODO: refactorize mad =", "beggining (after hoover), in each histogram plot in the page. Class covers the", "size and ticks are adjusted with accordance to display size sns.set_style(\"whitegrid\") fig, ax", "call for median from feature distribution return np.median(self.dataset[self.column]) def mode(self): # call for", "from feature distribution return scipy.stats.mode(self.dataset[self.column]) def standard_deviation(self): # call for standard deviation from", "feature # possible outcomes are: # -- quantitive continous # -- quantitive discrete", "np.mean(self.dataset[self.column]) def median(self): # call for median from feature distribution return np.median(self.dataset[self.column]) def", "display size sns.set_style(\"whitegrid\") fig, ax = plt.subplots() fig.set_size_inches(12, 12) ax=sns.distplot(self.dataset.iloc[:, [plot_number]], rug=True, color='k')", "a static file # size and ticks are adjusted with accordance to display", "cursor in common.py file in `Mutual_description` self.column = \"\" def histogram(self, plot_number): #", "value in self.dataset[self.column].values: if float(value) != int(value): return 'quantitive continous' if len(pd.unique(self.dataset[self.column])) ==", "feature distribution return scipy.stats.skew(self.dataset[self.column]) def entropy(self): # call for entropy from feature distribution", "pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns", "# plt.show() def measurement(self): # call for measurement category of the feature #", "from statsmodels import robust class Singular_description(object): ''' Display statistics from every numerical column", "scipy from statsmodels import robust class Singular_description(object): ''' Display statistics from every numerical", "static file # size and ticks are adjusted with accordance to display size", "mad = np.abs(np.subtract.outer(self.dataset[self.column], self.dataset[self.column])).mean() rmad = mad/np.mean(self.dataset[self.column]) return 0.5 * rmad def asymmetry_factor(self):", "measurement category of the feature # possible outcomes are: # -- quantitive continous", "every numerical column in data set. Base class for Mutual description instance. Outcomes", "def absolute_deviation_from_mean(self): # call for absolute deviation from mean from feature distribution return", "# size and ticks are adjusted with accordance to display size sns.set_style(\"whitegrid\") fig,", "np.std(self.dataset[self.column]) def absolute_deviation_from_mean(self): # call for absolute deviation from mean from feature distribution", "return np.std(self.dataset[self.column]) def absolute_deviation_from_mean(self): # call for absolute deviation from mean from feature", "# Generate histogram and save as a static file # size and ticks", "q25) def coefficient_of_variation(self): # call for coefficient of variation from feature distribution return", "-- quantitive discrete categorical # -- quantitive discrete numerical if self.dataset[self.column].dtypes == 'float64':", "in the page. Class covers the most general feature statistics used in data", "page. Class covers the most general feature statistics used in data analysis. '''", "fig.patch.set_alpha(0.0) plt.xticks(fontsize=25) plt.yticks(fontsize=25) fig.savefig('static/plot{}.png'.format(plot_number + 1), dpi=fig.dpi) # return fig # plt.show() def", "plt.subplots() fig.set_size_inches(12, 12) ax=sns.distplot(self.dataset.iloc[:, [plot_number]], rug=True, color='k') fig.patch.set_alpha(0.0) plt.xticks(fontsize=25) plt.yticks(fontsize=25) fig.savefig('static/plot{}.png'.format(plot_number + 1),", "from the beggining (after hoover), in each histogram plot in the page. Class", "len(pd.unique(self.dataset[self.column])) == 2: return 'quantitive discrete categorical' else: return 'quantitive discrete numerical' def", "# call for standard deviation from feature distribution return np.std(self.dataset[self.column]) def absolute_deviation_from_mean(self): #", "-- quantitive continous # -- quantitive discrete categorical # -- quantitive discrete numerical", "class for Mutual description instance. Outcomes are represented from the beggining (after hoover),", "used in data analysis. ''' def __init__(self): # Handled by cursor in common.py", "value from feature distribution return np.mean(self.dataset[self.column]) def median(self): # call for median from", "import scipy from statsmodels import robust class Singular_description(object): ''' Display statistics from every", "sns.set_style(\"whitegrid\") fig, ax = plt.subplots() fig.set_size_inches(12, 12) ax=sns.distplot(self.dataset.iloc[:, [plot_number]], rug=True, color='k') fig.patch.set_alpha(0.0) plt.xticks(fontsize=25)", "absolute_deviation_from_mean(self): # call for absolute deviation from mean from feature distribution return np.mean(np.absolute(self.dataset[self.column]", "most general feature statistics used in data analysis. ''' def __init__(self): # Handled", "distribution q75, q25 = np.percentile(self.dataset[self.column], [75 ,25]) return (q75 - q25) def coefficient_of_variation(self):", "# call for expected value from feature distribution return np.mean(self.dataset[self.column]) def median(self): #", "as sns import scipy from statsmodels import robust class Singular_description(object): ''' Display statistics", "class Singular_description(object): ''' Display statistics from every numerical column in data set. Base", "in data set. Base class for Mutual description instance. Outcomes are represented from", "Generate histogram and save as a static file # size and ticks are", "are represented from the beggining (after hoover), in each histogram plot in the", "gini coefficient from feature distribution # TODO: refactorize mad = np.abs(np.subtract.outer(self.dataset[self.column], self.dataset[self.column])).mean() rmad", "= np.percentile(self.dataset[self.column], [75 ,25]) return (q75 - q25) def coefficient_of_variation(self): # call for", "distribution return np.std(self.dataset[self.column]) def absolute_deviation_from_mean(self): # call for absolute deviation from mean from", "TODO: refactorize mad = np.abs(np.subtract.outer(self.dataset[self.column], self.dataset[self.column])).mean() rmad = mad/np.mean(self.dataset[self.column]) return 0.5 * rmad", "fig # plt.show() def measurement(self): # call for measurement category of the feature", "import matplotlib.pyplot as plt import seaborn as sns import scipy from statsmodels import", "`Mutual_description` self.column = \"\" def histogram(self, plot_number): # Generate histogram and save as", "np.percentile(self.dataset[self.column], [75 ,25]) return (q75 - q25) def coefficient_of_variation(self): # call for coefficient", "# call for asymmetry factor from feature distribution return scipy.stats.skew(self.dataset[self.column]) def entropy(self): #", "from feature distribution return np.median(self.dataset[self.column]) def mode(self): # call for mode from feature", "= np.abs(np.subtract.outer(self.dataset[self.column], self.dataset[self.column])).mean() rmad = mad/np.mean(self.dataset[self.column]) return 0.5 * rmad def asymmetry_factor(self): #", "feature distribution return np.mean(self.dataset[self.column]) def median(self): # call for median from feature distribution", "numpy as np import matplotlib.pyplot as plt import seaborn as sns import scipy", "for expected value from feature distribution return np.mean(self.dataset[self.column]) def median(self): # call for", "from feature distribution return scipy.stats.variation(self.dataset[self.column]) def gini_coefficient(self): # call for gini coefficient from", "of the feature # possible outcomes are: # -- quantitive continous # --", "distribution return np.mean(self.dataset[self.column]) def median(self): # call for median from feature distribution return", "return 'quantitive discrete numerical' def average(self): # TODO: remove return np.average(self.dataset[self.column]) def expected_value(self):", "for Mutual description instance. Outcomes are represented from the beggining (after hoover), in", "np.mean(np.absolute(self.dataset[self.column] - np.mean(self.dataset[self.column]))) def absolute_deviation_from_median(self): # call for mode from feature distribution return", "expected value from feature distribution return np.mean(self.dataset[self.column]) def median(self): # call for median", "__init__(self): # Handled by cursor in common.py file in `Mutual_description` self.column = \"\"", "instance. Outcomes are represented from the beggining (after hoover), in each histogram plot", "plt import seaborn as sns import scipy from statsmodels import robust class Singular_description(object):", "'quantitive discrete numerical' def average(self): # TODO: remove return np.average(self.dataset[self.column]) def expected_value(self): #", "plt.yticks(fontsize=25) fig.savefig('static/plot{}.png'.format(plot_number + 1), dpi=fig.dpi) # return fig # plt.show() def measurement(self): #", "q25 = np.percentile(self.dataset[self.column], [75 ,25]) return (q75 - q25) def coefficient_of_variation(self): # call", "for mode from feature distribution return scipy.stats.median_absolute_deviation(self.dataset[self.column]) def quarter_deviation(self): # call for quarter", "for median from feature distribution return np.median(self.dataset[self.column]) def mode(self): # call for mode", "(q75 - q25) def coefficient_of_variation(self): # call for coefficient of variation from feature", "np.abs(np.subtract.outer(self.dataset[self.column], self.dataset[self.column])).mean() rmad = mad/np.mean(self.dataset[self.column]) return 0.5 * rmad def asymmetry_factor(self): # call", "rug=True, color='k') fig.patch.set_alpha(0.0) plt.xticks(fontsize=25) plt.yticks(fontsize=25) fig.savefig('static/plot{}.png'.format(plot_number + 1), dpi=fig.dpi) # return fig #", "Class covers the most general feature statistics used in data analysis. ''' def", "possible outcomes are: # -- quantitive continous # -- quantitive discrete categorical #", "histogram plot in the page. Class covers the most general feature statistics used", "feature distribution return np.mean(np.absolute(self.dataset[self.column] - np.mean(self.dataset[self.column]))) def absolute_deviation_from_median(self): # call for mode from", "# possible outcomes are: # -- quantitive continous # -- quantitive discrete categorical", "quantitive discrete numerical if self.dataset[self.column].dtypes == 'float64': for value in self.dataset[self.column].values: if float(value)", "and save as a static file # size and ticks are adjusted with", "= plt.subplots() fig.set_size_inches(12, 12) ax=sns.distplot(self.dataset.iloc[:, [plot_number]], rug=True, color='k') fig.patch.set_alpha(0.0) plt.xticks(fontsize=25) plt.yticks(fontsize=25) fig.savefig('static/plot{}.png'.format(plot_number +", "1), dpi=fig.dpi) # return fig # plt.show() def measurement(self): # call for measurement", "statistics used in data analysis. ''' def __init__(self): # Handled by cursor in", "mode from feature distribution return scipy.stats.median_absolute_deviation(self.dataset[self.column]) def quarter_deviation(self): # call for quarter devaition", "self.column = \"\" def histogram(self, plot_number): # Generate histogram and save as a", "# return fig # plt.show() def measurement(self): # call for measurement category of" ]
[ "= docs.cpu().data.numpy() ques = ques.cpu().data.numpy() if ex_counter == 0: documents = docs questions", "params['feature_dict'] except BaseException: logger.warn('[ WARN: Saving failed... continuing anyway. ]') # ------------------------------------------------------------------------------ #", "args.domain, \"test/\") logger.info(\"Printing vectors at {}\".format(OUT_DIR)) if not os.path.exists(OUT_DIR): os.makedirs(OUT_DIR) else: shutil.rmtree(OUT_DIR, ignore_errors=True)", "inputs = [e if e is None or type(e) != type(ex[0]) else Variable(e.cuda(async=True))", "args.test == 1: fin = open(os.path.join(args.data_dir, args.src, \"data\", args.domain, args.test_file_name), \"rb\") all_test_exs =", "{}'.format(args.model_file)) ## check pointing## save(args, ret_model.model, optimizer, args.model_file+\".ckpt\", epoch=stats['epoch']) logger.info(\"Evaluating on the full", "* 1.0 / total_num_questions), (top_5 * 1.0 / total_num_questions))) ## saving code if", "documents.shape[0] assert questions.shape[0] == documents.shape[0] assert len(cum_num_lens) == len(qid2idx) assert len(cum_num_lens) == len(all_correct_answers)", "/ total_num_questions), (top_5 * 1.0 / total_num_questions) def save(args, model, optimizer, filename, epoch=None):", "def save_vectors(args, ret_model, corpus, data_loader, verified_dev_loader=None, save_scores = True, train=False, test=False): total_exs =", "can't test if args.small == 1: args.test = 0 if args.small == 1:", "# Set logging logger.setLevel(logging.INFO) fmt = logging.Formatter('%(asctime)s: %(message)s', '%m/%d/%Y %I:%M:%S %p') console =", "test=False): all_question_vectors = [] all_para_vectors = [] qid2idx = {} cum_num_lens = []", "num_paras[i] cum_num_lens.append(sum_num_paras) all_correct_answers[map_counter] = [] st = sum(num_paras[:i]) for j in range(num_paras[i]): if", "except BaseException: logger.warn('[ WARN: Saving failed... continuing anyway. ]') # ------------------------------------------------------------------------------ # Main.", "BrokenPipeError inputs = [e if e is None or type(e) != type(ex[0]) else", "%s' % args.optimizer) else: pass return ret, optimizer, word_dict, feature_dict def train_binary_classification(args, ret_model,", "= params['feature_dict'] except BaseException: logger.warn('[ WARN: Saving failed... continuing anyway. ]') # ------------------------------------------------------------------------------", "'best_acc': 0, 'best_verified_acc': 0} def make_data_loader(args, corpus, train_time=False): dataset = data.MultiCorpusDataset( args, corpus,", "state np.random.seed(args.random_seed) torch.manual_seed(args.random_seed) if args.cuda: torch.cuda.manual_seed(args.random_seed) # Set logging logger.setLevel(logging.INFO) fmt = logging.Formatter('%(asctime)s:", "torch.utils.data.DataLoader( dataset, batch_size=args.batch_size, sampler=sampler, num_workers=args.data_workers, collate_fn=vector.batchify(args, args.para_mode, train_time=train_time), pin_memory=True ) return loader def", "ex_counter == 0: documents = docs questions = ques else: documents = np.concatenate([documents,", "labels = labels.data.numpy() scores = scores.cpu().data.numpy() scores = scores.reshape((-1)) if save_scores: for i,", "* 100) logger.info('CONFIG:\\n%s' % json.dumps(vars(args), indent=4, sort_keys=True)) # small can't test if args.small", "RuntimeError: import pdb pdb.set_trace() num_paras = ex[1] qids = ex[-1] if args.save_para_clf_output: docs", "def get_topk_tfidf(corpus): top1 = 0 top3 = 0 top5 = 0 for qid", "+= 1 if sum([ans[1] for ans in sorted_para_scores[:3]]) > 0: top3 += 1", "epoch=None): params = { 'state_dict': { 'para_clf': model.state_dict(), 'optimizer': optimizer.state_dict() }, 'word_dict': args.word_dict,", "in sorted_para_scores[:5]]) > 0: top5 += 1 logger.info( 'top1 = {}, top3 =", "'.format(top1 / len(corpus.questions), top3 / len(corpus.questions), top5 / len(corpus.questions))) def run_predictions(args, data_loader, model,", "{}'.format(stats['best_acc'])) logger.info('Saving model at {}'.format(args.model_file)) logger.info(\"Logs saved at {}\".format(args.log_file)) save(args, ret_model.model, optimizer, args.model_file,", "vector from model.retriever import LSTMRetriever from multi_corpus import MultiCorpus from torch.utils.data.sampler import SequentialSampler,", "model.return_topk(5,*ret_input) except RuntimeError: import pdb pdb.set_trace() num_paras = ex[1] qids = ex[-1] if", "in ex[:]] ret_input = [*inputs[:4]] total_exs += ex[0].size(0) scores, _, _ = ret_model.score_paras(*ret_input)", "word_dict, feature_dict def train_binary_classification(args, ret_model, optimizer, train_loader, verified_dev_loader=None): args.train_time = True para_loss =", "else make_data_loader(args, all_train_exs, train_time=True) dev_loader = make_data_loader(args, all_dev_exs) if args.test: test_loader = make_data_loader(args,", "all_cumlen = np.array(cum_num_lens) np.save(OUT_DIR + \"document\", all_para_vectors) np.save(OUT_DIR + \"question\", all_question_vectors) np.save(OUT_DIR +", "{:2.4f}, top-3: {:2.4f}, top-5: {:2.4f}'.format( (top_1 * 1.0 / total_num_questions), (top_3 * 1.0", "# ------------------------------------------------------------------------------ def main(args): # PRINT CONFIG logger.info('-' * 100) logger.info('CONFIG:\\n%s' % json.dumps(vars(args),", "utils, data, vector from model.retriever import LSTMRetriever from multi_corpus import MultiCorpus from torch.utils.data.sampler", "import LSTMRetriever from multi_corpus import MultiCorpus from torch.utils.data.sampler import SequentialSampler, RandomSampler import math", "(top_3 * 1.0 / total_num_questions), (top_5 * 1.0 / total_num_questions))) ## saving code", "all_train_exs, train_loader, verified_dev_loader=None, train=True) if args.test: args.is_test = 1 logger.info(\"Saving test paragraph vectors\")", "all_question_vectors.shape[0] == len(cum_num_lens) assert all_question_vectors.shape[0] == len(qid2idx) assert all_question_vectors.shape[0] == len(all_correct_ans) ## saving", "loader logger.info(\"Making data loaders...\") if word_dict == None: args.word_dict = utils.build_word_dict(args, (all_train_exs, all_dev_exs))", "all_question_vectors.shape[0] == len(qid2idx) assert all_question_vectors.shape[0] == len(all_correct_ans) ## saving code if train: OUT_DIR", "top1 = top1/len(corpus.questions) top3 = top3/len(corpus.questions) top5 = top5/len(corpus.questions) logger.info('top1 = {}, top3", "if sum([ans[1] for ans in sorted_para_scores[:5]]) > 0: top5 += 1 top1 =", "| iter={}/{} | para loss = {:2.4f}'.format( stats['epoch'], idx, len(train_loader), para_loss.avg)) para_loss.reset() def", "+ 'correct_paras.json', 'w')) all_cumlen = np.array(cum_num_lens) np.save(OUT_DIR + \"document\", documents) np.save(OUT_DIR + \"question\",", "args.domain, args.train_file_name), \"rb\") all_train_exs = pickle.load(fin) fin.close() fin = open(os.path.join(args.data_dir, args.src, \"data\", args.domain,", "labels.cuda() # BCE logits loss batch_para_loss = F.binary_cross_entropy_with_logits(scores.squeeze(1), labels) optimizer.zero_grad() batch_para_loss.backward() torch.nn.utils.clip_grad_norm(ret_model.get_trainable_params(), 2.0)", "model, eval_on_train_set=False): args.train_time = False top_1 = 0 top_3 = 0 top_5 =", "test if args.small == 1: args.test = 0 if args.small == 1: args.train_file_name", "idx, ex in enumerate(tqdm(dev_loader)): if ex is None: raise BrokenPipeError inputs = [e", "== 1: args.test = 0 if args.small == 1: args.train_file_name = args.train_file_name +", "test examples {}\".format(len(all_test_exs.paragraphs))) if args.pretrained is None: ret_model, optimizer, word_dict, feature_dict = init_from_scratch(args,", "key=lambda x: x[0]) # import pdb # pdb.set_trace() if sorted_para_scores[0][1] > 0: top1", "total_num_questions))) ## saving code if args.save_para_clf_output: if eval_on_train_set: OUT_DIR = \"/iesl/canvas/sdhuliawala/vectors_web/train/\" else: OUT_DIR", "== 0: documents = docs questions = ques else: documents = np.concatenate([documents, docs])", "epoch: params['epoch'] = epoch try: torch.save(params, filename) # bad hack for not saving", "for qid in corpus.questions: para_scores = [(corpus.paragraphs[pid].tfidf_score, corpus.paragraphs[pid].ans_occurance) for pid in corpus.questions[qid].pids] sorted_para_scores", "verified_dev_loader=None): args.train_time = True para_loss = utils.AverageMeter() ret_model.model.train() for idx, ex in enumerate(train_loader):", "filename) # bad hack for not saving dictionary twice args.word_dict = params['word_dict'] args.feature_dict", "e in ex[:]] ret_input = [*inputs[:4]] total_exs += ex[0].size(0) scores, doc, ques =", "ret_model.model, optimizer, args.model_file, epoch=stats['epoch']) if __name__ == '__main__': # MODEL logger.info('-' * 100)", "args.feature_dict = params['feature_dict'] except BaseException: logger.warn('[ WARN: Saving failed... continuing anyway. ]') #", "top_1 = 0 top_3 = 0 top_5 = 0 total_num_questions = 0 map_counter", "not os.path.exists(OUT_DIR): os.makedirs(OUT_DIR) else: shutil.rmtree(OUT_DIR, ignore_errors=True) os.makedirs(OUT_DIR) json.dump(qid2idx, open(OUT_DIR + 'map.json', 'w')) json.dump(all_correct_ans,", "code if train: OUT_DIR = os.path.join(args.save_dir, args.src, args.domain, \"train/\") else: if args.is_test ==", "0 args.train_time = False ret_model.model.eval() accuracy = 0.0 for idx, ex in enumerate(tqdm(dev_loader)):", "cum_num_len = 0 for question_i, qid in enumerate(corpus.questions): labels = [] all_question_vectors.append(question_vectors[qid]) qid2idx[qid]", "logger.info(\"Logs saved at {}\".format(args.log_file)) save(args, ret_model.model, optimizer, args.model_file, epoch=stats['epoch']) if __name__ == '__main__':", "top_1 += 1 break counter += num_paras[q_counter] logger.info('Accuracy of para classifier when evaluated", "verified_dev_loader=None) logger.info('checkpointing model at {}'.format(args.model_file)) ## check pointing## save(args, ret_model.model, optimizer, args.model_file+\".ckpt\", epoch=stats['epoch'])", "/ len(corpus.questions))) def run_predictions(args, data_loader, model, eval_on_train_set=False): args.train_time = False top_1 = 0", "if i <= 2: top_3 += 1 if i <= 0: top_1 +=", "case: assert len(all_correct_answers[map_counter]) == sum(labels.data.numpy()[st: st + num_paras[i]]) map_counter += 1 counter =", "labels accuracy += a.sum() logger.info('Eval accuracy = {} '.format(accuracy/total_exs)) top1 = get_topk(corpus) return", "args.eval_only: logger.info(\"Saving dev paragraph vectors\") save_vectors(args, ret_model, all_dev_exs, dev_loader, verified_dev_loader=None) logger.info(\"Saving train paragraph", "{}, top5 = {} '.format(top1 / len(corpus.questions), top3 / len(corpus.questions), top5 / len(corpus.questions)))", "args and setup environment args = config.get_args() # Set cuda args.cuda = not", "len(corpus.questions[qid].pids) cum_num_lens.append(cum_num_len) for para_i, pid in enumerate(corpus.questions[qid].pids): if corpus.paragraphs[pid].ans_occurance > 0: labels.append(para_i) all_para_vectors.append(para_vectors[pid])", "None: ret_model, optimizer, word_dict, feature_dict = init_from_scratch(args, all_train_exs) else: ret_model, optimizer, word_dict, feature_dict", "train_binary_classification(args, ret_model, optimizer, train_loader, verified_dev_loader=None): args.train_time = True para_loss = utils.AverageMeter() ret_model.model.train() for", "None: args.word_dict = utils.build_word_dict(args, (all_train_exs, all_dev_exs)) word_dict = args.word_dict train_loader = make_data_loader(args, all_train_exs,", "init_from_scratch(args, train_exs): logger.info('Initializing model from scratch') word_dict = feature_dict = None # create", "len(corpus.questions))) def run_predictions(args, data_loader, model, eval_on_train_set=False): args.train_time = False top_1 = 0 top_3", "+ 'map.json', 'w')) json.dump(all_correct_answers, open(OUT_DIR + 'correct_paras.json', 'w')) all_cumlen = np.array(cum_num_lens) np.save(OUT_DIR +", "para_vectors = {} question_vectors = {} for idx, ex in enumerate(tqdm(data_loader)): if ex", "ex[0].size(0) scores, _, _ = ret_model.score_paras(*ret_input) scores = F.sigmoid(scores) y_num_occurrences = Variable(ex[-2]) labels", "= args.test_file_name + \"_small\" args.train_file_name = args.train_file_name + \".pkl\" args.dev_file_name = args.dev_file_name +", "total_num_questions), (top_3 * 1.0 / total_num_questions), (top_5 * 1.0 / total_num_questions))) ## saving", "saving dictionary twice args.word_dict = params['word_dict'] args.feature_dict = params['feature_dict'] except BaseException: logger.warn('[ WARN:", "torch.optim as optim import torch.nn.functional as F from torch.utils.data.sampler import RandomSampler import config", "qid2idx = {} sum_num_paras = 0 all_correct_answers = {} for ex_counter, ex in", "0 for qid in corpus.questions: para_scores = [(corpus.paragraphs[pid].tfidf_score, corpus.paragraphs[pid].ans_occurance) for pid in corpus.questions[qid].pids]", "+ \".pkl\" if args.test == 1: args.test_file_name = args.test_file_name + \".pkl\" logger.info(\"Loading pickle", "torch.cuda.manual_seed(args.random_seed) # Set logging logger.setLevel(logging.INFO) fmt = logging.Formatter('%(asctime)s: %(message)s', '%m/%d/%Y %I:%M:%S %p') console", "> 0: top5 += 1 logger.info( 'top1 = {}, top3 = {}, top5", "args.optimizer) else: pass return ret, optimizer, word_dict, feature_dict def train_binary_classification(args, ret_model, optimizer, train_loader,", "question_vectors: question_vectors[qid] = ques[i] for i, pid in enumerate(ex[-1]): corpus.paragraphs[pid].model_score = scores[i] get_topk(corpus)", "= utils.build_word_dict(args, (all_train_exs, all_dev_exs)) word_dict = args.word_dict train_loader = make_data_loader(args, all_train_exs, train_time=False) if", "twice args.word_dict = params['word_dict'] args.feature_dict = params['feature_dict'] except BaseException: logger.warn('[ WARN: Saving failed...", "open(os.path.join(args.data_dir, args.src, \"data\", args.domain, args.dev_file_name), \"rb\") all_dev_exs = pickle.load(fin) fin.close() if args.test ==", "= top1/len(corpus.questions) top3 = top3/len(corpus.questions) top5 = top5/len(corpus.questions) logger.info('top1 = {}, top3 =", "break counter += num_paras[q_counter] logger.info('Accuracy of para classifier when evaluated on the annotated", "= init_from_scratch(args, all_train_exs) else: ret_model, optimizer, word_dict, feature_dict = init_from_checkpoint(args) # make data", "random state np.random.seed(args.random_seed) torch.manual_seed(args.random_seed) if args.cuda: torch.cuda.manual_seed(args.random_seed) # Set logging logger.setLevel(logging.INFO) fmt =", "== cum_num_lens[-1] assert all_question_vectors.shape[0] == len(cum_num_lens) assert all_question_vectors.shape[0] == len(qid2idx) assert all_question_vectors.shape[0] ==", "except RuntimeError: import pdb pdb.set_trace() num_paras = ex[1] qids = ex[-1] if args.save_para_clf_output:", "saved checkpoint {}'.format(args.pretrained)) model = torch.load(args.pretrained) word_dict = model['word_dict'] feature_dict = model['feature_dict'] args.vocab_size", "labels[counter + no_paras ] ==1: if i <= 4: top_5 += 1 if", "= feature_dict ret = LSTMRetriever(args, word_dict, feature_dict) # -------------------------------------------------------------------------- # TRAIN/VALID LOOP #", "load saved param values ret.model.load_state_dict(model['state_dict']['para_clf']) optimizer = None parameters = ret.get_trainable_params() if args.optimizer", "weight_decay=args.weight_decay) else: raise RuntimeError('Unsupported optimizer: %s' % args.optimizer) optimizer.load_state_dict(model['state_dict']['optimizer']) logger.info('Model loaded...') return ret,", "== 1: logger.info(\"Num test examples {}\".format(len(all_test_exs.paragraphs))) if args.pretrained is None: ret_model, optimizer, word_dict,", "import math logger = logging.getLogger() global_timer = utils.Timer() stats = {'timer': global_timer, 'epoch':", "raise BrokenPipeError inputs = [e if e is None or type(e) != type(ex[0])", "+ \"all_cumlen\", cum_num_lens) def save_vectors(args, ret_model, corpus, data_loader, verified_dev_loader=None, save_scores = True, train=False,", "from torch.utils.data.sampler import SequentialSampler, RandomSampler import math logger = logging.getLogger() global_timer = utils.Timer()", "and setup environment args = config.get_args() # Set cuda args.cuda = not args.no_cuda", "args.dev_file_name + \"_small\" if args.test == 1: args.test_file_name = args.test_file_name + \"_small\" args.train_file_name", "vectors\") save_vectors(args, ret_model, all_test_exs, test_loader, verified_dev_loader=None) else: get_topk_tfidf(all_dev_exs) for epoch in range(args.num_epochs): stats['epoch']", "idx, ex in enumerate(tqdm(data_loader)): if ex is None: raise BrokenPipeError inputs = [e", "True): total_exs = 0 args.train_time = False ret_model.model.eval() accuracy = 0.0 for idx,", "+ \"question\", questions) np.save(OUT_DIR + \"all_cumlen\", all_cumlen) return (top_1 * 1.0 / total_num_questions),", "= logging.Formatter('%(asctime)s: %(message)s', '%m/%d/%Y %I:%M:%S %p') console = logging.StreamHandler() console.setFormatter(fmt) logger.addHandler(console) if args.log_file:", "e is None or type(e) != type(ex[0]) else Variable(e.cuda(async=True)) for e in ex[:]]", "y_num_occurrences = ex[3] labels = (y_num_occurrences > 0) try: topk_paras, docs, ques =", "args.word_dict = utils.build_word_dict(args, (all_train_exs, all_dev_exs)) word_dict = args.word_dict train_loader = make_data_loader(args, all_train_exs, train_time=False)", "type(ex[0]) else Variable(e.cuda(async=True)) for e in ex[:]] ret_input = [*inputs[:4]] total_exs += ex[0].size(0)", "\"question\", all_question_vectors) np.save(OUT_DIR + \"all_cumlen\", cum_num_lens) def save_vectors(args, ret_model, corpus, data_loader, verified_dev_loader=None, save_scores", "is None or type(e) != type(ex[0]) else Variable(e.cuda(async=True)) for e in ex[:]] ret_input", "[] qid2idx = {} sum_num_paras = 0 all_correct_answers = {} for ex_counter, ex", "+ \"_small\" args.dev_file_name = args.dev_file_name + \"_small\" if args.test == 1: args.test_file_name =", "labels = labels.cuda() # BCE logits loss batch_para_loss = F.binary_cross_entropy_with_logits(scores.squeeze(1), labels) optimizer.zero_grad() batch_para_loss.backward()", "in sorted_para_scores[:3]]) > 0: top3 += 1 if sum([ans[1] for ans in sorted_para_scores[:5]])", "Set cuda args.cuda = not args.no_cuda and torch.cuda.is_available() if args.cuda: torch.cuda.set_device(args.gpu) # Set", "'a') else: logfile = logging.FileHandler(args.log_file, 'w') logfile.setFormatter(fmt) logger.addHandler(logfile) logger.info('[ COMMAND: %s ]' %", "= SequentialSampler(dataset) if not train_time else RandomSampler(dataset) loader = torch.utils.data.DataLoader( dataset, batch_size=args.batch_size, sampler=sampler,", "scores = F.sigmoid(scores) y_num_occurrences = Variable(ex[-2]) labels = (y_num_occurrences > 0).float() labels =", "as optim import torch.nn.functional as F from torch.utils.data.sampler import RandomSampler import config from", "= scores.cpu().data.numpy() scores = scores.reshape((-1)) if save_scores: for i, pid in enumerate(ex[-1]): para_vectors[pid]", "optimizer, word_dict, feature_dict = init_from_checkpoint(args) # make data loader logger.info(\"Making data loaders...\") if", "return loader def init_from_checkpoint(args): logger.info('Loading model from saved checkpoint {}'.format(args.pretrained)) model = torch.load(args.pretrained)", "1.0 / total_num_questions), (top_3 * 1.0 / total_num_questions), (top_5 * 1.0 / total_num_questions)))", "word_dict = args.word_dict train_loader = make_data_loader(args, all_train_exs, train_time=False) if args.eval_only else make_data_loader(args, all_train_exs,", "OUT_DIR = os.path.join(args.save_dir, args.src, args.domain, \"dev/\") else: OUT_DIR = os.path.join(args.save_dir, args.src, args.domain, \"test/\")", "ret.model.load_state_dict(model['state_dict']['para_clf']) optimizer = None parameters = ret.get_trainable_params() if args.optimizer == 'sgd': optimizer =", "sys import logging import shutil from tqdm import tqdm from torch.autograd import Variable", "train parameters = ret.get_trainable_params() optimizer = None if parameters is not None and", "= ret_model.score_paras(*ret_input) scores = F.sigmoid(scores) y_num_occurrences = Variable(ex[-2]) labels = (y_num_occurrences > 0).float()", "/ total_num_questions), (top_5 * 1.0 / total_num_questions))) ## saving code if args.save_para_clf_output: if", "all_correct_ans = {} cum_num_len = 0 for question_i, qid in enumerate(corpus.questions): labels =", "is None: raise BrokenPipeError inputs = [e if e is None or type(e)", "1 break counter += num_paras[q_counter] logger.info('Accuracy of para classifier when evaluated on the", "top1 = eval_binary_classification(args, ret_model, all_dev_exs, dev_loader, verified_dev_loader=None) if stats['best_acc'] < top1: stats['best_acc'] =", "is None: continue inputs = [e if e is None or type(e) !=", "word_dict, feature_dict def init_from_scratch(args, train_exs): logger.info('Initializing model from scratch') word_dict = feature_dict =", "ans in sorted_para_scores[:5]]) > 0: top5 += 1 logger.info( 'top1 = {}, top3", "25 == 0 and idx > 0: logger.info('Epoch = {} | iter={}/{} |", "optimizer.step() para_loss.update(batch_para_loss.data.item()) if math.isnan(para_loss.avg): import pdb pdb.set_trace() if idx % 25 == 0", "code if args.save_para_clf_output: if eval_on_train_set: OUT_DIR = \"/iesl/canvas/sdhuliawala/vectors_web/train/\" else: OUT_DIR = \"/iesl/canvas/sdhuliawala/vectors_web/dev/\" if", "{}, top3 = {}, top5 = {} '.format(top1, top3 ,top5 )) return top1", "args.optimizer == 'sgd': optimizer = optim.SGD(parameters, args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay) elif args.optimizer == 'adamax':", "main(args): # PRINT CONFIG logger.info('-' * 100) logger.info('CONFIG:\\n%s' % json.dumps(vars(args), indent=4, sort_keys=True)) #", "args.model_file, epoch=stats['epoch']) if __name__ == '__main__': # MODEL logger.info('-' * 100) # Parse", "!= type(ex[0]) else Variable(e.cuda(async=True)) for e in ex[:]] ret_input = [*inputs[:4]] scores, _,", "= args.embedding_dim args.word_dict = word_dict args.feature_dict = feature_dict ret = LSTMRetriever(args, word_dict, feature_dict)", "args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay) elif args.optimizer == 'adamax': optimizer = optim.Adamax(parameters, weight_decay=args.weight_decay) elif args.optimizer", "= args.word_dict train_loader = make_data_loader(args, all_train_exs, train_time=False) if args.eval_only else make_data_loader(args, all_train_exs, train_time=True)", "# Main. # ------------------------------------------------------------------------------ def main(args): # PRINT CONFIG logger.info('-' * 100) logger.info('CONFIG:\\n%s'", "corpus, train=False, test=False): all_question_vectors = [] all_para_vectors = [] qid2idx = {} cum_num_lens", "optim.SGD(parameters, args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay) elif args.optimizer == 'adamax': optimizer = optim.Adamax(parameters, weight_decay=args.weight_decay) elif", "get_topk_tfidf(all_dev_exs) for epoch in range(args.num_epochs): stats['epoch'] = epoch train_binary_classification(args, ret_model, optimizer, train_loader, verified_dev_loader=None)", "dataset, batch_size=args.batch_size, sampler=sampler, num_workers=args.data_workers, collate_fn=vector.batchify(args, args.para_mode, train_time=train_time), pin_memory=True ) return loader def init_from_checkpoint(args):", "init_from_scratch(args, all_train_exs) else: ret_model, optimizer, word_dict, feature_dict = init_from_checkpoint(args) # make data loader", "type(ex[0]) else Variable(e.cuda(async=True)) for e in ex[:]] ret_input = [*inputs[:4]] scores, _, _", "1 top1 = top1/len(corpus.questions) top3 = top3/len(corpus.questions) top5 = top5/len(corpus.questions) logger.info('top1 = {},", "-------------------------------------------------------------------------- # TRAIN/VALID LOOP # -------------------------------------------------------------------------- # train parameters = ret.get_trainable_params() optimizer =", "import os import pickle import sys import logging import shutil from tqdm import", "pid in ex[-1]]): if qid not in question_vectors: question_vectors[qid] = ques[i] for i,", "sorted_para_scores[:5]]) > 0: top5 += 1 logger.info( 'top1 = {}, top3 = {},", "for ex_counter, ex in tqdm(enumerate(data_loader)): ret_input = [*ex] y_num_occurrences = ex[3] labels =", "idx, ex in enumerate(train_loader): if ex is None: continue inputs = [e if", "pickle.load(fin) fin.close() if args.test == 1: fin = open(os.path.join(args.data_dir, args.src, \"data\", args.domain, args.test_file_name),", "key=lambda x: x[0], reverse=True) if sorted_para_scores[0][1] > 0: top1 += 1 if sum([ans[1]", "0.5 a = scores == labels accuracy += a.sum() logger.info('Eval accuracy = {}", "if word_dict == None: args.word_dict = utils.build_word_dict(args, (all_train_exs, all_dev_exs)) word_dict = args.word_dict train_loader", "# train parameters = ret.get_trainable_params() optimizer = None if parameters is not None", "None args.feature_dict = None params['config'] = vars(args) if epoch: params['epoch'] = epoch try:", "total_exs += ex[0].size(0) scores, doc, ques = ret_model.score_paras(*ret_input) scores = scores.cpu().data.numpy() scores =", "0, 'best_verified_acc': 0} def make_data_loader(args, corpus, train_time=False): dataset = data.MultiCorpusDataset( args, corpus, args.word_dict,", "pid in corpus.questions[qid].pids] sorted_para_scores = sorted(para_scores, key=lambda x: x[0], reverse=True) if sorted_para_scores[0][1] >", "total_num_questions += 1 for i, no_paras in enumerate(ranked_para_ids): if labels[counter + no_paras ]", "in enumerate(corpus.questions[qid].pids): if corpus.paragraphs[pid].ans_occurance > 0: labels.append(para_i) all_para_vectors.append(para_vectors[pid]) all_correct_ans[qid] = labels all_para_vectors =", "top5 = 0 for qid in corpus.questions: para_scores = [(corpus.paragraphs[pid].model_score,corpus.paragraphs[pid].ans_occurance ) for pid", "= eval_binary_classification(args, ret_model, all_dev_exs, dev_loader, verified_dev_loader=None) if stats['best_acc'] < top1: stats['best_acc'] = top1", "else: shutil.rmtree(OUT_DIR, ignore_errors=True) os.mkdir(OUT_DIR) #Test cases assert cum_num_lens[-1] == documents.shape[0] assert questions.shape[0] ==", "def init_from_checkpoint(args): logger.info('Loading model from saved checkpoint {}'.format(args.pretrained)) model = torch.load(args.pretrained) word_dict =", "{} question_vectors = {} for idx, ex in enumerate(tqdm(data_loader)): if ex is None:", "train: OUT_DIR = os.path.join(args.save_dir, args.src, args.domain, \"train/\") else: if args.is_test == 0: OUT_DIR", "word_dict = model['word_dict'] feature_dict = model['feature_dict'] args.vocab_size = len(word_dict) args.embedding_dim_orig = args.embedding_dim args.word_dict", "ret = LSTMRetriever(args, word_dict, feature_dict) # -------------------------------------------------------------------------- # TRAIN/VALID LOOP # -------------------------------------------------------------------------- #", "and idx > 0: logger.info('Epoch = {} | iter={}/{} | para loss =", "= 0 args.train_time = False ret_model.model.eval() para_vectors = {} question_vectors = {} for", "logger.info(\"Evaluating on the full dev set....\") top1 = eval_binary_classification(args, ret_model, all_dev_exs, dev_loader, verified_dev_loader=None)", "total_num_questions), (top_5 * 1.0 / total_num_questions) def save(args, model, optimizer, filename, epoch=None): params", "import utils, data, vector from model.retriever import LSTMRetriever from multi_corpus import MultiCorpus from", "import sys import logging import shutil from tqdm import tqdm from torch.autograd import", "== 1: args.test_file_name = args.test_file_name + \".pkl\" logger.info(\"Loading pickle files\") fin = open(os.path.join(args.data_dir,", "torch.autograd import Variable import torch.optim as optim import torch.nn.functional as F from torch.utils.data.sampler", "args.train_file_name = args.train_file_name + \".pkl\" args.dev_file_name = args.dev_file_name + \".pkl\" if args.test ==", "in enumerate(ex[-1]): corpus.paragraphs[pid].model_score = scores[i] get_topk(corpus) print_vectors(args, para_vectors, question_vectors, corpus, train, test) def", "0: top3 += 1 if sum([ans[1] for ans in sorted_para_scores[:5]]) > 0: top5", "optim.Adamax(parameters, weight_decay=args.weight_decay) elif args.optimizer == 'nag': optimizer = NAG(parameters, args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay) else:", "== documents.shape[0] assert len(cum_num_lens) == len(qid2idx) assert len(cum_num_lens) == len(all_correct_answers) json.dump(qid2idx, open(OUT_DIR +", "for pid in corpus.questions[qid].pids] sorted_para_scores = sorted(para_scores, key=lambda x: x[0]) # import pdb", "np.concatenate([documents, docs]) questions = np.concatenate([questions, ques]) ### create map and cum_num_lens for i,", "all_dev_exs = pickle.load(fin) fin.close() if args.test == 1: fin = open(os.path.join(args.data_dir, args.src, \"data\",", "torch.cuda.is_available() if args.cuda: torch.cuda.set_device(args.gpu) # Set random state np.random.seed(args.random_seed) torch.manual_seed(args.random_seed) if args.cuda: torch.cuda.manual_seed(args.random_seed)", "= None args.feature_dict = None params['config'] = vars(args) if epoch: params['epoch'] = epoch", "loss batch_para_loss = F.binary_cross_entropy_with_logits(scores.squeeze(1), labels) optimizer.zero_grad() batch_para_loss.backward() torch.nn.utils.clip_grad_norm(ret_model.get_trainable_params(), 2.0) optimizer.step() para_loss.update(batch_para_loss.data.item()) if math.isnan(para_loss.avg):", "pickle files\") fin = open(os.path.join(args.data_dir, args.src, \"data\", args.domain, args.train_file_name), \"rb\") all_train_exs = pickle.load(fin)", "logger.info('-' * 100) # Parse cmdline args and setup environment args = config.get_args()", "eval_on_train_set: OUT_DIR = \"/iesl/canvas/sdhuliawala/vectors_web/train/\" else: OUT_DIR = \"/iesl/canvas/sdhuliawala/vectors_web/dev/\" if not os.path.exists(OUT_DIR): os.mkdir(OUT_DIR) else:", "model.state_dict(), 'optimizer': optimizer.state_dict() }, 'word_dict': args.word_dict, 'feature_dict': args.feature_dict } args.word_dict = None args.feature_dict", "accuracy = {} '.format(accuracy/total_exs)) top1 = get_topk(corpus) return top1 def print_vectors(args, para_vectors, question_vectors,", "logger.info('Epoch = {} | iter={}/{} | para loss = {:2.4f}'.format( stats['epoch'], idx, len(train_loader),", "else: raise RuntimeError('Unsupported optimizer: %s' % args.optimizer) else: pass return ret, optimizer, word_dict,", "logger.info(\"Saving dev paragraph vectors\") save_vectors(args, ret_model, all_dev_exs, dev_loader, verified_dev_loader=None) logger.info(\"Saving train paragraph vectors\")", "params['word_dict'] args.feature_dict = params['feature_dict'] except BaseException: logger.warn('[ WARN: Saving failed... continuing anyway. ]')", "0).float() labels = labels.cuda() # BCE logits loss batch_para_loss = F.binary_cross_entropy_with_logits(scores.squeeze(1), labels) optimizer.zero_grad()", "= args.train_file_name + \".pkl\" args.dev_file_name = args.dev_file_name + \".pkl\" if args.test == 1:", "optimizer = None parameters = ret.get_trainable_params() if args.optimizer == 'sgd': optimizer = optim.SGD(parameters,", "args.test_file_name = args.test_file_name + \"_small\" args.train_file_name = args.train_file_name + \".pkl\" args.dev_file_name = args.dev_file_name", "word_dict, feature_dict) # load saved param values ret.model.load_state_dict(model['state_dict']['para_clf']) optimizer = None parameters =", "ret_model.model.train() for idx, ex in enumerate(train_loader): if ex is None: continue inputs =", "args.dev_file_name = args.dev_file_name + \"_small\" if args.test == 1: args.test_file_name = args.test_file_name +", "test) def get_topk(corpus): top1 = 0 top3 = 0 top5 = 0 for", "+= num_paras[i] cum_num_lens.append(sum_num_paras) all_correct_answers[map_counter] = [] st = sum(num_paras[:i]) for j in range(num_paras[i]):", "args.domain, \"train/\") else: if args.is_test == 0: OUT_DIR = os.path.join(args.save_dir, args.src, args.domain, \"dev/\")", "os.mkdir(OUT_DIR) #Test cases assert cum_num_lens[-1] == documents.shape[0] assert questions.shape[0] == documents.shape[0] assert len(cum_num_lens)", "if ex is None: raise BrokenPipeError inputs = [e if e is None", "'w')) all_cumlen = np.array(cum_num_lens) np.save(OUT_DIR + \"document\", documents) np.save(OUT_DIR + \"question\", questions) np.save(OUT_DIR", "{} '.format(top1, top3 ,top5 )) return top1 def get_topk_tfidf(corpus): top1 = 0 top3", "model at {}'.format(args.model_file)) ## check pointing## save(args, ret_model.model, optimizer, args.model_file+\".ckpt\", epoch=stats['epoch']) logger.info(\"Evaluating on", "None: args.vocab_size = len(word_dict) args.embedding_dim_orig = args.embedding_dim args.word_dict = word_dict args.feature_dict = feature_dict", "i, pid in enumerate(ex[-1]): corpus.paragraphs[pid].model_score = scores[i] get_topk(corpus) print_vectors(args, para_vectors, question_vectors, corpus, train,", "= {} '.format(top1 / len(corpus.questions), top3 / len(corpus.questions), top5 / len(corpus.questions))) def run_predictions(args,", "total_exs += ex[0].size(0) scores, _, _ = ret_model.score_paras(*ret_input) scores = F.sigmoid(scores) y_num_occurrences =", "{}'.format(args.model_file)) logger.info(\"Logs saved at {}\".format(args.log_file)) save(args, ret_model.model, optimizer, args.model_file, epoch=stats['epoch']) if __name__ ==", "0: logger.info('Epoch = {} | iter={}/{} | para loss = {:2.4f}'.format( stats['epoch'], idx,", "-------------------------------------------------------------------------- # train parameters = ret.get_trainable_params() optimizer = None if parameters is not", "args.src, args.domain, \"dev/\") else: OUT_DIR = os.path.join(args.save_dir, args.src, args.domain, \"test/\") logger.info(\"Printing vectors at", "in corpus.questions[qid].pids] sorted_para_scores = sorted(para_scores, key=lambda x: x[0], reverse=True) if sorted_para_scores[0][1] > 0:", "sum([ans[1] for ans in sorted_para_scores[:5]]) > 0: top5 += 1 top1 = top1/len(corpus.questions)", "= LSTMRetriever(args, word_dict, feature_dict) # -------------------------------------------------------------------------- # TRAIN/VALID LOOP # -------------------------------------------------------------------------- # train", "logging logger.setLevel(logging.INFO) fmt = logging.Formatter('%(asctime)s: %(message)s', '%m/%d/%Y %I:%M:%S %p') console = logging.StreamHandler() console.setFormatter(fmt)", "# small can't test if args.small == 1: args.test = 0 if args.small", "para_loss.reset() def eval_binary_classification(args, ret_model, corpus, dev_loader, verified_dev_loader=None, save_scores = True): total_exs = 0", "args.cuda: torch.cuda.manual_seed(args.random_seed) # Set logging logger.setLevel(logging.INFO) fmt = logging.Formatter('%(asctime)s: %(message)s', '%m/%d/%Y %I:%M:%S %p')", "is None: ret_model, optimizer, word_dict, feature_dict = init_from_scratch(args, all_train_exs) else: ret_model, optimizer, word_dict,", "feature_dict) # -------------------------------------------------------------------------- # TRAIN/VALID LOOP # -------------------------------------------------------------------------- # train parameters = ret.get_trainable_params()", "0: top5 += 1 logger.info( 'top1 = {}, top3 = {}, top5 =", "dataset = data.MultiCorpusDataset( args, corpus, args.word_dict, args.feature_dict, single_answer=False, para_mode=args.para_mode, train_time=train_time ) sampler =", "= [] all_question_vectors.append(question_vectors[qid]) qid2idx[qid] = question_i cum_num_len += len(corpus.questions[qid].pids) cum_num_lens.append(cum_num_len) for para_i, pid", "(all_train_exs, all_dev_exs)) word_dict = args.word_dict train_loader = make_data_loader(args, all_train_exs, train_time=False) if args.eval_only else", "accuracy += a.sum() logger.info('Eval accuracy = {} '.format(accuracy/total_exs)) top1 = get_topk(corpus) return top1", "feature_dict) # load saved param values ret.model.load_state_dict(model['state_dict']['para_clf']) optimizer = None parameters = ret.get_trainable_params()", "+= 1 break counter += num_paras[q_counter] logger.info('Accuracy of para classifier when evaluated on", "# pdb.set_trace() if sorted_para_scores[0][1] > 0: top1 += 1 if sum([ans[1] for ans", "corpus.questions: para_scores = [(corpus.paragraphs[pid].tfidf_score, corpus.paragraphs[pid].ans_occurance) for pid in corpus.questions[qid].pids] sorted_para_scores = sorted(para_scores, key=lambda", "# import pdb # pdb.set_trace() if sorted_para_scores[0][1] > 0: top1 += 1 if", "import json import os import pickle import sys import logging import shutil from", "LOOP # -------------------------------------------------------------------------- # train parameters = ret.get_trainable_params() optimizer = None if parameters", "small can't test if args.small == 1: args.test = 0 if args.small ==", "all_dev_exs) if args.test: test_loader = make_data_loader(args, all_test_exs) if args.eval_only: logger.info(\"Saving dev paragraph vectors\")", "args, corpus, args.word_dict, args.feature_dict, single_answer=False, para_mode=args.para_mode, train_time=train_time ) sampler = SequentialSampler(dataset) if not", "ex in enumerate(tqdm(data_loader)): if ex is None: raise BrokenPipeError inputs = [e if", "True, train=False, test=False): total_exs = 0 args.train_time = False ret_model.model.eval() para_vectors = {}", "{} cum_num_lens = [] all_correct_ans = {} cum_num_len = 0 for question_i, qid", "= 0 top3 = 0 top5 = 0 for qid in corpus.questions: para_scores", "assert len(cum_num_lens) == len(all_correct_answers) json.dump(qid2idx, open(OUT_DIR + 'map.json', 'w')) json.dump(all_correct_answers, open(OUT_DIR + 'correct_paras.json',", "total_num_questions), (top_3 * 1.0 / total_num_questions), (top_5 * 1.0 / total_num_questions) def save(args,", "NAG(parameters, args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay) else: raise RuntimeError('Unsupported optimizer: %s' % args.optimizer) optimizer.load_state_dict(model['state_dict']['optimizer']) logger.info('Model", "2: top_3 += 1 if i <= 0: top_1 += 1 break counter", "scores = scores.cpu().data.numpy() scores = scores.reshape((-1)) if save_scores: for i, pid in enumerate(ex[-1]):", "= True, train=False, test=False): total_exs = 0 args.train_time = False ret_model.model.eval() para_vectors =", "top1 def get_topk_tfidf(corpus): top1 = 0 top3 = 0 top5 = 0 for", "sorted_para_scores[:3]]) > 0: top3 += 1 if sum([ans[1] for ans in sorted_para_scores[:5]]) >", "0.0 for idx, ex in enumerate(tqdm(dev_loader)): if ex is None: raise BrokenPipeError inputs", "environment args = config.get_args() # Set cuda args.cuda = not args.no_cuda and torch.cuda.is_available()", "dev_loader, verified_dev_loader=None, save_scores = True): total_exs = 0 args.train_time = False ret_model.model.eval() accuracy", "/ total_num_questions), (top_3 * 1.0 / total_num_questions), (top_5 * 1.0 / total_num_questions))) ##", "top_3 = 0 top_5 = 0 total_num_questions = 0 map_counter = 0 cum_num_lens", "make data loader logger.info(\"Making data loaders...\") if word_dict == None: args.word_dict = utils.build_word_dict(args,", "import MultiCorpus from torch.utils.data.sampler import SequentialSampler, RandomSampler import math logger = logging.getLogger() global_timer", "top3 = {}, top5 = {} '.format(top1, top3 ,top5 )) return top1 def", "'.format(accuracy/total_exs)) top1 = get_topk(corpus) return top1 def print_vectors(args, para_vectors, question_vectors, corpus, train=False, test=False):", "1: args.test = 0 if args.small == 1: args.train_file_name = args.train_file_name + \"_small\"", "st + num_paras[i]]) map_counter += 1 counter = 0 for q_counter, ranked_para_ids in", "args.test == 1: args.test_file_name = args.test_file_name + \"_small\" args.train_file_name = args.train_file_name + \".pkl\"", "1.0 / total_num_questions) def save(args, model, optimizer, filename, epoch=None): params = { 'state_dict':", "cases assert cum_num_lens[-1] == documents.shape[0] assert questions.shape[0] == documents.shape[0] assert len(cum_num_lens) == len(qid2idx)", "= {} cum_num_len = 0 for question_i, qid in enumerate(corpus.questions): labels = []", "= ques else: documents = np.concatenate([documents, docs]) questions = np.concatenate([questions, ques]) ### create", "labels.data.numpy() scores = scores.cpu().data.numpy() scores = scores.reshape((-1)) if save_scores: for i, pid in", "para_scores = [(corpus.paragraphs[pid].model_score,corpus.paragraphs[pid].ans_occurance ) for pid in corpus.questions[qid].pids] sorted_para_scores = sorted(para_scores, key=lambda x:", "in enumerate([corpus.paragraphs[pid].qid for pid in ex[-1]]): if qid not in question_vectors: question_vectors[qid] =", "top1/len(corpus.questions) top3 = top3/len(corpus.questions) top5 = top5/len(corpus.questions) logger.info('top1 = {}, top3 = {},", "from scratch') word_dict = feature_dict = None # create or get vocab word_dict", "enumerate(ex[-1]): corpus.paragraphs[pid].model_score = scores[i] get_topk(corpus) print_vectors(args, para_vectors, question_vectors, corpus, train, test) def get_topk(corpus):", "x: x[0], reverse=True) if sorted_para_scores[0][1] > 0: top1 += 1 if sum([ans[1] for", "logger.info(\"Num dev examples {}\".format(len(all_dev_exs.paragraphs))) if args.test == 1: logger.info(\"Num test examples {}\".format(len(all_test_exs.paragraphs))) if", "0 for qid in corpus.questions: para_scores = [(corpus.paragraphs[pid].model_score,corpus.paragraphs[pid].ans_occurance ) for pid in corpus.questions[qid].pids]", "get_topk_tfidf(corpus): top1 = 0 top3 = 0 top5 = 0 for qid in", "eval_binary_classification(args, ret_model, corpus, dev_loader, verified_dev_loader=None, save_scores = True): total_exs = 0 args.train_time =", "ex is None: continue inputs = [e if e is None or type(e)", "= utils.build_word_dict(args, train_exs) if word_dict is not None: args.vocab_size = len(word_dict) args.embedding_dim_orig =", "args.optimizer) optimizer.load_state_dict(model['state_dict']['optimizer']) logger.info('Model loaded...') return ret, optimizer, word_dict, feature_dict def init_from_scratch(args, train_exs): logger.info('Initializing", "hack for not saving dictionary twice args.word_dict = params['word_dict'] args.feature_dict = params['feature_dict'] except", "no_paras ] ==1: if i <= 4: top_5 += 1 if i <=", "PRINT CONFIG logger.info('-' * 100) logger.info('CONFIG:\\n%s' % json.dumps(vars(args), indent=4, sort_keys=True)) # small can't", "+ \".pkl\" args.dev_file_name = args.dev_file_name + \".pkl\" if args.test == 1: args.test_file_name =", "q_counter, ranked_para_ids in enumerate(topk_paras): total_num_questions += 1 for i, no_paras in enumerate(ranked_para_ids): if", "save_scores = True, train=False, test=False): total_exs = 0 args.train_time = False ret_model.model.eval() para_vectors", "return top1 def get_topk_tfidf(corpus): top1 = 0 top3 = 0 top5 = 0", "ret_model.model.eval() accuracy = 0.0 for idx, ex in enumerate(tqdm(dev_loader)): if ex is None:", "len(corpus.questions), top5 / len(corpus.questions))) def run_predictions(args, data_loader, model, eval_on_train_set=False): args.train_time = False top_1", "num_paras[q_counter] logger.info('Accuracy of para classifier when evaluated on the annotated dev set.') logger.info('top-1:", "0 top5 = 0 for qid in corpus.questions: para_scores = [(corpus.paragraphs[pid].tfidf_score, corpus.paragraphs[pid].ans_occurance) for", "len(qid2idx) assert all_question_vectors.shape[0] == len(all_correct_ans) ## saving code if train: OUT_DIR = os.path.join(args.save_dir,", "accuracy = 0.0 for idx, ex in enumerate(tqdm(dev_loader)): if ex is None: raise", "return top1 def print_vectors(args, para_vectors, question_vectors, corpus, train=False, test=False): all_question_vectors = [] all_para_vectors", "questions) np.save(OUT_DIR + \"all_cumlen\", all_cumlen) return (top_1 * 1.0 / total_num_questions), (top_3 *", "stats = {'timer': global_timer, 'epoch': 0, 'best_valid': 0, 'best_verified_valid': 0, 'best_acc': 0, 'best_verified_acc':", "None: raise BrokenPipeError inputs = [e if e is None or type(e) !=", "= os.path.join(args.save_dir, args.src, args.domain, \"dev/\") else: OUT_DIR = os.path.join(args.save_dir, args.src, args.domain, \"test/\") logger.info(\"Printing", "Test case: assert len(all_correct_answers[map_counter]) == sum(labels.data.numpy()[st: st + num_paras[i]]) map_counter += 1 counter", "+ 'correct_paras.json', 'w')) all_cumlen = np.array(cum_num_lens) np.save(OUT_DIR + \"document\", all_para_vectors) np.save(OUT_DIR + \"question\",", "= 0 top_5 = 0 total_num_questions = 0 map_counter = 0 cum_num_lens =", "+= 1 if i <= 0: top_1 += 1 break counter += num_paras[q_counter]", "checkpoint {}'.format(args.pretrained)) model = torch.load(args.pretrained) word_dict = model['word_dict'] feature_dict = model['feature_dict'] args.vocab_size =", "ret_model, all_dev_exs, dev_loader, verified_dev_loader=None) logger.info(\"Saving train paragraph vectors\") save_vectors(args, ret_model, all_train_exs, train_loader, verified_dev_loader=None,", "def print_vectors(args, para_vectors, question_vectors, corpus, train=False, test=False): all_question_vectors = [] all_para_vectors = []", "for idx, ex in enumerate(train_loader): if ex is None: continue inputs = [e", "from torch.utils.data.sampler import RandomSampler import config from model import utils, data, vector from", "verified_dev_loader=None) if stats['best_acc'] < top1: stats['best_acc'] = top1 logger.info('Best accuracy {}'.format(stats['best_acc'])) logger.info('Saving model", "'para_clf': model.state_dict(), 'optimizer': optimizer.state_dict() }, 'word_dict': args.word_dict, 'feature_dict': args.feature_dict } args.word_dict = None", "else: ret_model, optimizer, word_dict, feature_dict = init_from_checkpoint(args) # make data loader logger.info(\"Making data", "args.test: args.is_test = 1 logger.info(\"Saving test paragraph vectors\") save_vectors(args, ret_model, all_test_exs, test_loader, verified_dev_loader=None)", "logger.info(\"Saving test paragraph vectors\") save_vectors(args, ret_model, all_test_exs, test_loader, verified_dev_loader=None) else: get_topk_tfidf(all_dev_exs) for epoch", "OUT_DIR = os.path.join(args.save_dir, args.src, args.domain, \"train/\") else: if args.is_test == 0: OUT_DIR =", "corpus, args.word_dict, args.feature_dict, single_answer=False, para_mode=args.para_mode, train_time=train_time ) sampler = SequentialSampler(dataset) if not train_time", "= ques.cpu().data.numpy() if ex_counter == 0: documents = docs questions = ques else:", "in enumerate(ex[-1]): corpus.paragraphs[pid].model_score = scores[i] scores = scores > 0.5 a = scores", "all_cumlen) return (top_1 * 1.0 / total_num_questions), (top_3 * 1.0 / total_num_questions), (top_5", "\".pkl\" if args.test == 1: args.test_file_name = args.test_file_name + \".pkl\" logger.info(\"Loading pickle files\")", "== 1: args.train_file_name = args.train_file_name + \"_small\" args.dev_file_name = args.dev_file_name + \"_small\" if", "= np.concatenate([documents, docs]) questions = np.concatenate([questions, ques]) ### create map and cum_num_lens for", "ex[:]] ret_input = [*inputs[:4]] scores, _, _ = ret_model.score_paras(*ret_input) y_num_occurrences = Variable(ex[-2]) labels", "labels) optimizer.zero_grad() batch_para_loss.backward() torch.nn.utils.clip_grad_norm(ret_model.get_trainable_params(), 2.0) optimizer.step() para_loss.update(batch_para_loss.data.item()) if math.isnan(para_loss.avg): import pdb pdb.set_trace() if", "torch.utils.data.sampler import SequentialSampler, RandomSampler import math logger = logging.getLogger() global_timer = utils.Timer() stats", "def init_from_scratch(args, train_exs): logger.info('Initializing model from scratch') word_dict = feature_dict = None #", "else: shutil.rmtree(OUT_DIR, ignore_errors=True) os.makedirs(OUT_DIR) json.dump(qid2idx, open(OUT_DIR + 'map.json', 'w')) json.dump(all_correct_ans, open(OUT_DIR + 'correct_paras.json',", "not args.no_cuda and torch.cuda.is_available() if args.cuda: torch.cuda.set_device(args.gpu) # Set random state np.random.seed(args.random_seed) torch.manual_seed(args.random_seed)", "np.save(OUT_DIR + \"document\", documents) np.save(OUT_DIR + \"question\", questions) np.save(OUT_DIR + \"all_cumlen\", all_cumlen) return", "args.dev_file_name = args.dev_file_name + \".pkl\" if args.test == 1: args.test_file_name = args.test_file_name +", "OUT_DIR = \"/iesl/canvas/sdhuliawala/vectors_web/dev/\" if not os.path.exists(OUT_DIR): os.mkdir(OUT_DIR) else: shutil.rmtree(OUT_DIR, ignore_errors=True) os.mkdir(OUT_DIR) #Test cases", "train_time=train_time), pin_memory=True ) return loader def init_from_checkpoint(args): logger.info('Loading model from saved checkpoint {}'.format(args.pretrained))", "= [e if e is None or type(e) != type(ex[0]) else Variable(e.cuda(async=True)) for", "word_dict = utils.build_word_dict(args, train_exs) if word_dict is not None: args.vocab_size = len(word_dict) args.embedding_dim_orig", "dictionary twice args.word_dict = params['word_dict'] args.feature_dict = params['feature_dict'] except BaseException: logger.warn('[ WARN: Saving", "for question_i, qid in enumerate(corpus.questions): labels = [] all_question_vectors.append(question_vectors[qid]) qid2idx[qid] = question_i cum_num_len", "4: top_5 += 1 if i <= 2: top_3 += 1 if i", "done!\") logger.info(\"Num train examples {}\".format(len(all_train_exs.paragraphs))) logger.info(\"Num dev examples {}\".format(len(all_dev_exs.paragraphs))) if args.test == 1:", "= args.dev_file_name + \".pkl\" if args.test == 1: args.test_file_name = args.test_file_name + \".pkl\"", "torch.cuda.set_device(args.gpu) # Set random state np.random.seed(args.random_seed) torch.manual_seed(args.random_seed) if args.cuda: torch.cuda.manual_seed(args.random_seed) # Set logging", "= args.train_file_name + \"_small\" args.dev_file_name = args.dev_file_name + \"_small\" if args.test == 1:", "qid in enumerate([corpus.paragraphs[pid].qid for pid in ex[-1]]): if qid not in question_vectors: question_vectors[qid]", "True para_loss = utils.AverageMeter() ret_model.model.train() for idx, ex in enumerate(train_loader): if ex is", "args.word_dict, 'feature_dict': args.feature_dict } args.word_dict = None args.feature_dict = None params['config'] = vars(args)", "100) # Parse cmdline args and setup environment args = config.get_args() # Set", "cum_num_len += len(corpus.questions[qid].pids) cum_num_lens.append(cum_num_len) for para_i, pid in enumerate(corpus.questions[qid].pids): if corpus.paragraphs[pid].ans_occurance > 0:", "= sorted(para_scores, key=lambda x: x[0], reverse=True) if sorted_para_scores[0][1] > 0: top1 += 1", "[*ex] y_num_occurrences = ex[3] labels = (y_num_occurrences > 0) try: topk_paras, docs, ques", "if not os.path.exists(OUT_DIR): os.makedirs(OUT_DIR) else: shutil.rmtree(OUT_DIR, ignore_errors=True) os.makedirs(OUT_DIR) json.dump(qid2idx, open(OUT_DIR + 'map.json', 'w'))", "top5 += 1 top1 = top1/len(corpus.questions) top3 = top3/len(corpus.questions) top5 = top5/len(corpus.questions) logger.info('top1", "= logging.StreamHandler() console.setFormatter(fmt) logger.addHandler(console) if args.log_file: if args.checkpoint: logfile = logging.FileHandler(args.log_file, 'a') else:", "[e if e is None or type(e) != type(ex[0]) else Variable(e.cuda(async=True)) for e", "= 1 logger.info(\"Saving test paragraph vectors\") save_vectors(args, ret_model, all_test_exs, test_loader, verified_dev_loader=None) else: get_topk_tfidf(all_dev_exs)", ") for pid in corpus.questions[qid].pids] sorted_para_scores = sorted(para_scores, key=lambda x: x[0], reverse=True) if", "torch.nn.utils.clip_grad_norm(ret_model.get_trainable_params(), 2.0) optimizer.step() para_loss.update(batch_para_loss.data.item()) if math.isnan(para_loss.avg): import pdb pdb.set_trace() if idx % 25", "if stats['best_acc'] < top1: stats['best_acc'] = top1 logger.info('Best accuracy {}'.format(stats['best_acc'])) logger.info('Saving model at", "%(message)s', '%m/%d/%Y %I:%M:%S %p') console = logging.StreamHandler() console.setFormatter(fmt) logger.addHandler(console) if args.log_file: if args.checkpoint:", "model['word_dict'] feature_dict = model['feature_dict'] args.vocab_size = len(word_dict) args.embedding_dim_orig = args.embedding_dim args.word_dict = word_dict", "train=True) if args.test: args.is_test = 1 logger.info(\"Saving test paragraph vectors\") save_vectors(args, ret_model, all_test_exs,", "if parameters is not None and len(parameters) > 0: if args.optimizer == 'sgd':", "= 0 args.train_time = False ret_model.model.eval() accuracy = 0.0 for idx, ex in", "ques]) ### create map and cum_num_lens for i, qid in enumerate(qids): qid2idx[qid] =", "train_loader, verified_dev_loader=None, train=True) if args.test: args.is_test = 1 logger.info(\"Saving test paragraph vectors\") save_vectors(args,", "= model['feature_dict'] args.vocab_size = len(word_dict) args.embedding_dim_orig = args.embedding_dim args.word_dict = word_dict args.feature_dict =", "= (y_num_occurrences > 0) try: topk_paras, docs, ques = model.return_topk(5,*ret_input) except RuntimeError: import", "if ex is None: continue inputs = [e if e is None or", "scores == labels accuracy += a.sum() logger.info('Eval accuracy = {} '.format(accuracy/total_exs)) top1 =", "<= 4: top_5 += 1 if i <= 2: top_3 += 1 if", "a.sum() logger.info('Eval accuracy = {} '.format(accuracy/total_exs)) top1 = get_topk(corpus) return top1 def print_vectors(args,", "make_data_loader(args, corpus, train_time=False): dataset = data.MultiCorpusDataset( args, corpus, args.word_dict, args.feature_dict, single_answer=False, para_mode=args.para_mode, train_time=train_time", "+= ex[0].size(0) scores, _, _ = ret_model.score_paras(*ret_input) scores = F.sigmoid(scores) y_num_occurrences = Variable(ex[-2])", "Variable(ex[-2]) labels = (y_num_occurrences > 0).float() labels = labels.data.numpy() scores = scores.cpu().data.numpy() scores", "logger.info(\"Num train examples {}\".format(len(all_train_exs.paragraphs))) logger.info(\"Num dev examples {}\".format(len(all_dev_exs.paragraphs))) if args.test == 1: logger.info(\"Num", "# create or get vocab word_dict = utils.build_word_dict(args, train_exs) if word_dict is not", "in enumerate(topk_paras): total_num_questions += 1 for i, no_paras in enumerate(ranked_para_ids): if labels[counter +", "ex in tqdm(enumerate(data_loader)): ret_input = [*ex] y_num_occurrences = ex[3] labels = (y_num_occurrences >", "cmdline args and setup environment args = config.get_args() # Set cuda args.cuda =", "all_correct_answers[map_counter].append(j) ### Test case: assert len(all_correct_answers[map_counter]) == sum(labels.data.numpy()[st: st + num_paras[i]]) map_counter +=", "= False top_1 = 0 top_3 = 0 top_5 = 0 total_num_questions =", "= make_data_loader(args, all_dev_exs) if args.test: test_loader = make_data_loader(args, all_test_exs) if args.eval_only: logger.info(\"Saving dev", "ret_input = [*inputs[:4]] scores, _, _ = ret_model.score_paras(*ret_input) y_num_occurrences = Variable(ex[-2]) labels =", "== len(all_correct_ans) ## saving code if train: OUT_DIR = os.path.join(args.save_dir, args.src, args.domain, \"train/\")", "logfile = logging.FileHandler(args.log_file, 'w') logfile.setFormatter(fmt) logger.addHandler(logfile) logger.info('[ COMMAND: %s ]' % ' '.join(sys.argv))", "* 1.0 / total_num_questions))) ## saving code if args.save_para_clf_output: if eval_on_train_set: OUT_DIR =", "optimizer.zero_grad() batch_para_loss.backward() torch.nn.utils.clip_grad_norm(ret_model.get_trainable_params(), 2.0) optimizer.step() para_loss.update(batch_para_loss.data.item()) if math.isnan(para_loss.avg): import pdb pdb.set_trace() if idx", "all_question_vectors.shape[0] == len(all_correct_ans) ## saving code if train: OUT_DIR = os.path.join(args.save_dir, args.src, args.domain,", "scores, _, _ = ret_model.score_paras(*ret_input) scores = F.sigmoid(scores) y_num_occurrences = Variable(ex[-2]) labels =", "for idx, ex in enumerate(tqdm(dev_loader)): if ex is None: raise BrokenPipeError inputs =", "/ total_num_questions) def save(args, model, optimizer, filename, epoch=None): params = { 'state_dict': {", "args.test == 1: logger.info(\"Num test examples {}\".format(len(all_test_exs.paragraphs))) if args.pretrained is None: ret_model, optimizer,", "MODEL logger.info('-' * 100) # Parse cmdline args and setup environment args =", "args.save_para_clf_output: if eval_on_train_set: OUT_DIR = \"/iesl/canvas/sdhuliawala/vectors_web/train/\" else: OUT_DIR = \"/iesl/canvas/sdhuliawala/vectors_web/dev/\" if not os.path.exists(OUT_DIR):", "= NAG(parameters, args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay) else: raise RuntimeError('Unsupported optimizer: %s' % args.optimizer) else:", "e in ex[:]] ret_input = [*inputs[:4]] scores, _, _ = ret_model.score_paras(*ret_input) y_num_occurrences =", "para classifier when evaluated on the annotated dev set.') logger.info('top-1: {:2.4f}, top-3: {:2.4f},", "docs = docs.cpu().data.numpy() ques = ques.cpu().data.numpy() if ex_counter == 0: documents = docs", "= feature_dict = None # create or get vocab word_dict = utils.build_word_dict(args, train_exs)", "0 total_num_questions = 0 map_counter = 0 cum_num_lens = [] qid2idx = {}", "for i, qid in enumerate(qids): qid2idx[qid] = map_counter sum_num_paras += num_paras[i] cum_num_lens.append(sum_num_paras) all_correct_answers[map_counter]", "create or get vocab word_dict = utils.build_word_dict(args, train_exs) if word_dict is not None:", "= NAG(parameters, args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay) else: raise RuntimeError('Unsupported optimizer: %s' % args.optimizer) optimizer.load_state_dict(model['state_dict']['optimizer'])", "if labels[counter + no_paras ] ==1: if i <= 4: top_5 += 1", "== 1: fin = open(os.path.join(args.data_dir, args.src, \"data\", args.domain, args.test_file_name), \"rb\") all_test_exs = pickle.load(fin)", "args.word_dict = None args.feature_dict = None params['config'] = vars(args) if epoch: params['epoch'] =", "enumerate(train_loader): if ex is None: continue inputs = [e if e is None", "]') # ------------------------------------------------------------------------------ # Main. # ------------------------------------------------------------------------------ def main(args): # PRINT CONFIG logger.info('-'", "Set logging logger.setLevel(logging.INFO) fmt = logging.Formatter('%(asctime)s: %(message)s', '%m/%d/%Y %I:%M:%S %p') console = logging.StreamHandler()", "ex[3] labels = (y_num_occurrences > 0) try: topk_paras, docs, ques = model.return_topk(5,*ret_input) except", "batch_para_loss.backward() torch.nn.utils.clip_grad_norm(ret_model.get_trainable_params(), 2.0) optimizer.step() para_loss.update(batch_para_loss.data.item()) if math.isnan(para_loss.avg): import pdb pdb.set_trace() if idx %", "SequentialSampler, RandomSampler import math logger = logging.getLogger() global_timer = utils.Timer() stats = {'timer':", "if train: OUT_DIR = os.path.join(args.save_dir, args.src, args.domain, \"train/\") else: if args.is_test == 0:", "train_loader, verified_dev_loader=None): args.train_time = True para_loss = utils.AverageMeter() ret_model.model.train() for idx, ex in", "scratch') word_dict = feature_dict = None # create or get vocab word_dict =", "full dev set....\") top1 = eval_binary_classification(args, ret_model, all_dev_exs, dev_loader, verified_dev_loader=None) if stats['best_acc'] <", ") return loader def init_from_checkpoint(args): logger.info('Loading model from saved checkpoint {}'.format(args.pretrained)) model =", "\".pkl\" args.dev_file_name = args.dev_file_name + \".pkl\" if args.test == 1: args.test_file_name = args.test_file_name", "len(all_correct_answers) json.dump(qid2idx, open(OUT_DIR + 'map.json', 'w')) json.dump(all_correct_answers, open(OUT_DIR + 'correct_paras.json', 'w')) all_cumlen =", "Variable import torch.optim as optim import torch.nn.functional as F from torch.utils.data.sampler import RandomSampler", "anyway. ]') # ------------------------------------------------------------------------------ # Main. # ------------------------------------------------------------------------------ def main(args): # PRINT CONFIG", "== 1: args.test_file_name = args.test_file_name + \"_small\" args.train_file_name = args.train_file_name + \".pkl\" args.dev_file_name", "assert all_question_vectors.shape[0] == len(cum_num_lens) assert all_question_vectors.shape[0] == len(qid2idx) assert all_question_vectors.shape[0] == len(all_correct_ans) ##", "vocab word_dict = utils.build_word_dict(args, train_exs) if word_dict is not None: args.vocab_size = len(word_dict)", "args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay) else: raise RuntimeError('Unsupported optimizer: %s' % args.optimizer) else: pass return", "for qid in corpus.questions: para_scores = [(corpus.paragraphs[pid].model_score,corpus.paragraphs[pid].ans_occurance ) for pid in corpus.questions[qid].pids] sorted_para_scores", "'w')) all_cumlen = np.array(cum_num_lens) np.save(OUT_DIR + \"document\", all_para_vectors) np.save(OUT_DIR + \"question\", all_question_vectors) np.save(OUT_DIR", "scores > 0.5 a = scores == labels accuracy += a.sum() logger.info('Eval accuracy", "1 if sum([ans[1] for ans in sorted_para_scores[:5]]) > 0: top5 += 1 top1", "top-5: {:2.4f}'.format( (top_1 * 1.0 / total_num_questions), (top_3 * 1.0 / total_num_questions), (top_5", "= 0 if args.small == 1: args.train_file_name = args.train_file_name + \"_small\" args.dev_file_name =", "top_5 += 1 if i <= 2: top_3 += 1 if i <=", "args.embedding_dim_orig = args.embedding_dim args.word_dict = word_dict args.feature_dict = feature_dict ret = LSTMRetriever(args, word_dict,", "1: all_correct_answers[map_counter].append(j) ### Test case: assert len(all_correct_answers[map_counter]) == sum(labels.data.numpy()[st: st + num_paras[i]]) map_counter", "if corpus.paragraphs[pid].ans_occurance > 0: labels.append(para_i) all_para_vectors.append(para_vectors[pid]) all_correct_ans[qid] = labels all_para_vectors = np.stack(all_para_vectors) all_question_vectors", "= [*inputs[:4]] scores, _, _ = ret_model.score_paras(*ret_input) y_num_occurrences = Variable(ex[-2]) labels = (y_num_occurrences", "stats['epoch'], idx, len(train_loader), para_loss.avg)) para_loss.reset() def eval_binary_classification(args, ret_model, corpus, dev_loader, verified_dev_loader=None, save_scores =", "top1: stats['best_acc'] = top1 logger.info('Best accuracy {}'.format(stats['best_acc'])) logger.info('Saving model at {}'.format(args.model_file)) logger.info(\"Logs saved", "else: documents = np.concatenate([documents, docs]) questions = np.concatenate([questions, ques]) ### create map and", "+ \"_small\" args.train_file_name = args.train_file_name + \".pkl\" args.dev_file_name = args.dev_file_name + \".pkl\" if", "= np.concatenate([questions, ques]) ### create map and cum_num_lens for i, qid in enumerate(qids):", "top5 = top5/len(corpus.questions) logger.info('top1 = {}, top3 = {}, top5 = {} '.format(top1,", "= Variable(ex[-2]) labels = (y_num_occurrences > 0).float() labels = labels.data.numpy() scores = scores.cpu().data.numpy()", "len(all_correct_answers[map_counter]) == sum(labels.data.numpy()[st: st + num_paras[i]]) map_counter += 1 counter = 0 for", "epoch=stats['epoch']) if __name__ == '__main__': # MODEL logger.info('-' * 100) # Parse cmdline", "documents = np.concatenate([documents, docs]) questions = np.concatenate([questions, ques]) ### create map and cum_num_lens", "in sorted_para_scores[:5]]) > 0: top5 += 1 top1 = top1/len(corpus.questions) top3 = top3/len(corpus.questions)", "scores[i] scores = scores > 0.5 a = scores == labels accuracy +=", "top1 = get_topk(corpus) return top1 def print_vectors(args, para_vectors, question_vectors, corpus, train=False, test=False): all_question_vectors", "= [(corpus.paragraphs[pid].model_score,corpus.paragraphs[pid].ans_occurance ) for pid in corpus.questions[qid].pids] sorted_para_scores = sorted(para_scores, key=lambda x: x[0],", "torch.manual_seed(args.random_seed) if args.cuda: torch.cuda.manual_seed(args.random_seed) # Set logging logger.setLevel(logging.INFO) fmt = logging.Formatter('%(asctime)s: %(message)s', '%m/%d/%Y", "at {}\".format(OUT_DIR)) if not os.path.exists(OUT_DIR): os.makedirs(OUT_DIR) else: shutil.rmtree(OUT_DIR, ignore_errors=True) os.makedirs(OUT_DIR) json.dump(qid2idx, open(OUT_DIR +", "+ \"_small\" if args.test == 1: args.test_file_name = args.test_file_name + \"_small\" args.train_file_name =", "else Variable(e.cuda(async=True)) for e in ex[:]] ret_input = [*inputs[:4]] scores, _, _ =", "if args.is_test == 0: OUT_DIR = os.path.join(args.save_dir, args.src, args.domain, \"dev/\") else: OUT_DIR =", "torch.load(args.pretrained) word_dict = model['word_dict'] feature_dict = model['feature_dict'] args.vocab_size = len(word_dict) args.embedding_dim_orig = args.embedding_dim", "len(cum_num_lens) == len(qid2idx) assert len(cum_num_lens) == len(all_correct_answers) json.dump(qid2idx, open(OUT_DIR + 'map.json', 'w')) json.dump(all_correct_answers,", "{}\".format(OUT_DIR)) if not os.path.exists(OUT_DIR): os.makedirs(OUT_DIR) else: shutil.rmtree(OUT_DIR, ignore_errors=True) os.makedirs(OUT_DIR) json.dump(qid2idx, open(OUT_DIR + 'map.json',", "= {} for idx, ex in enumerate(tqdm(data_loader)): if ex is None: raise BrokenPipeError", "json.dumps(vars(args), indent=4, sort_keys=True)) # small can't test if args.small == 1: args.test =", "ret, optimizer, word_dict, feature_dict def train_binary_classification(args, ret_model, optimizer, train_loader, verified_dev_loader=None): args.train_time = True", "= scores == labels accuracy += a.sum() logger.info('Eval accuracy = {} '.format(accuracy/total_exs)) top1", "= ret_model.score_paras(*ret_input) scores = scores.cpu().data.numpy() scores = scores.reshape((-1)) if save_scores: for i, pid", "args.is_test == 0: OUT_DIR = os.path.join(args.save_dir, args.src, args.domain, \"dev/\") else: OUT_DIR = os.path.join(args.save_dir,", "loaded...') return ret, optimizer, word_dict, feature_dict def init_from_scratch(args, train_exs): logger.info('Initializing model from scratch')", "= LSTMRetriever(args, word_dict, feature_dict) # load saved param values ret.model.load_state_dict(model['state_dict']['para_clf']) optimizer = None", "map_counter += 1 counter = 0 for q_counter, ranked_para_ids in enumerate(topk_paras): total_num_questions +=", "= open(os.path.join(args.data_dir, args.src, \"data\", args.domain, args.train_file_name), \"rb\") all_train_exs = pickle.load(fin) fin.close() fin =", "len(word_dict) args.embedding_dim_orig = args.embedding_dim args.word_dict = word_dict args.feature_dict = feature_dict ret = LSTMRetriever(args,", "== len(qid2idx) assert all_question_vectors.shape[0] == len(all_correct_ans) ## saving code if train: OUT_DIR =", "in corpus.questions[qid].pids] sorted_para_scores = sorted(para_scores, key=lambda x: x[0]) # import pdb # pdb.set_trace()", "scores.reshape((-1)) if save_scores: for i, pid in enumerate(ex[-1]): para_vectors[pid] = doc[i] for i,", "RuntimeError('Unsupported optimizer: %s' % args.optimizer) else: pass return ret, optimizer, word_dict, feature_dict def", "= labels.data.numpy() scores = scores.cpu().data.numpy() scores = scores.reshape((-1)) if save_scores: for i, pid", "para_loss.avg)) para_loss.reset() def eval_binary_classification(args, ret_model, corpus, dev_loader, verified_dev_loader=None, save_scores = True): total_exs =", "save_scores = True): total_exs = 0 args.train_time = False ret_model.model.eval() accuracy = 0.0", "top3 ,top5 )) return top1 def get_topk_tfidf(corpus): top1 = 0 top3 = 0", "range(num_paras[i]): if labels[st+j] == 1: all_correct_answers[map_counter].append(j) ### Test case: assert len(all_correct_answers[map_counter]) == sum(labels.data.numpy()[st:", "args.test: test_loader = make_data_loader(args, all_test_exs) if args.eval_only: logger.info(\"Saving dev paragraph vectors\") save_vectors(args, ret_model,", "total_num_questions), (top_5 * 1.0 / total_num_questions))) ## saving code if args.save_para_clf_output: if eval_on_train_set:", "para_loss.update(batch_para_loss.data.item()) if math.isnan(para_loss.avg): import pdb pdb.set_trace() if idx % 25 == 0 and", "pointing## save(args, ret_model.model, optimizer, args.model_file+\".ckpt\", epoch=stats['epoch']) logger.info(\"Evaluating on the full dev set....\") top1", "ex[1] qids = ex[-1] if args.save_para_clf_output: docs = docs.cpu().data.numpy() ques = ques.cpu().data.numpy() if", "== 1: all_correct_answers[map_counter].append(j) ### Test case: assert len(all_correct_answers[map_counter]) == sum(labels.data.numpy()[st: st + num_paras[i]])", "model['feature_dict'] args.vocab_size = len(word_dict) args.embedding_dim_orig = args.embedding_dim args.word_dict = word_dict args.feature_dict = feature_dict", "0 top_5 = 0 total_num_questions = 0 map_counter = 0 cum_num_lens = []", "= vars(args) if epoch: params['epoch'] = epoch try: torch.save(params, filename) # bad hack", "{} for ex_counter, ex in tqdm(enumerate(data_loader)): ret_input = [*ex] y_num_occurrences = ex[3] labels", "train_time=False): dataset = data.MultiCorpusDataset( args, corpus, args.word_dict, args.feature_dict, single_answer=False, para_mode=args.para_mode, train_time=train_time ) sampler", "console.setFormatter(fmt) logger.addHandler(console) if args.log_file: if args.checkpoint: logfile = logging.FileHandler(args.log_file, 'a') else: logfile =", "for ans in sorted_para_scores[:5]]) > 0: top5 += 1 top1 = top1/len(corpus.questions) top3", "args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay) else: raise RuntimeError('Unsupported optimizer: %s' % args.optimizer) optimizer.load_state_dict(model['state_dict']['optimizer']) logger.info('Model loaded...')", "elif args.optimizer == 'adamax': optimizer = optim.Adamax(parameters, weight_decay=args.weight_decay) elif args.optimizer == 'nag': optimizer", "import numpy as np import json import os import pickle import sys import", "'map.json', 'w')) json.dump(all_correct_ans, open(OUT_DIR + 'correct_paras.json', 'w')) all_cumlen = np.array(cum_num_lens) np.save(OUT_DIR + \"document\",", "assert all_para_vectors.shape[0] == cum_num_lens[-1] assert all_question_vectors.shape[0] == len(cum_num_lens) assert all_question_vectors.shape[0] == len(qid2idx) assert", "para_scores = [(corpus.paragraphs[pid].tfidf_score, corpus.paragraphs[pid].ans_occurance) for pid in corpus.questions[qid].pids] sorted_para_scores = sorted(para_scores, key=lambda x:", "all_question_vectors = [] all_para_vectors = [] qid2idx = {} cum_num_lens = [] all_correct_ans", "= scores > 0.5 a = scores == labels accuracy += a.sum() logger.info('Eval", "sorted_para_scores = sorted(para_scores, key=lambda x: x[0]) # import pdb # pdb.set_trace() if sorted_para_scores[0][1]", "> 0: logger.info('Epoch = {} | iter={}/{} | para loss = {:2.4f}'.format( stats['epoch'],", "i, qid in enumerate([corpus.paragraphs[pid].qid for pid in ex[-1]]): if qid not in question_vectors:", ")) return top1 def get_topk_tfidf(corpus): top1 = 0 top3 = 0 top5 =", "for ans in sorted_para_scores[:3]]) > 0: top3 += 1 if sum([ans[1] for ans", "def make_data_loader(args, corpus, train_time=False): dataset = data.MultiCorpusDataset( args, corpus, args.word_dict, args.feature_dict, single_answer=False, para_mode=args.para_mode,", "save(args, model, optimizer, filename, epoch=None): params = { 'state_dict': { 'para_clf': model.state_dict(), 'optimizer':", "args.vocab_size = len(word_dict) args.embedding_dim_orig = args.embedding_dim args.word_dict = word_dict args.feature_dict = feature_dict ret", "= sorted(para_scores, key=lambda x: x[0]) # import pdb # pdb.set_trace() if sorted_para_scores[0][1] >", "args.save_para_clf_output: docs = docs.cpu().data.numpy() ques = ques.cpu().data.numpy() if ex_counter == 0: documents =", "if save_scores: for i, pid in enumerate(ex[-1]): corpus.paragraphs[pid].model_score = scores[i] scores = scores", "data_loader, model, eval_on_train_set=False): args.train_time = False top_1 = 0 top_3 = 0 top_5", "import Variable import torch.optim as optim import torch.nn.functional as F from torch.utils.data.sampler import", "all_para_vectors.shape[0] == cum_num_lens[-1] assert all_question_vectors.shape[0] == len(cum_num_lens) assert all_question_vectors.shape[0] == len(qid2idx) assert all_question_vectors.shape[0]", "# PRINT CONFIG logger.info('-' * 100) logger.info('CONFIG:\\n%s' % json.dumps(vars(args), indent=4, sort_keys=True)) # small", "logger.info('Best accuracy {}'.format(stats['best_acc'])) logger.info('Saving model at {}'.format(args.model_file)) logger.info(\"Logs saved at {}\".format(args.log_file)) save(args, ret_model.model,", "0 top_3 = 0 top_5 = 0 total_num_questions = 0 map_counter = 0", "x: x[0]) # import pdb # pdb.set_trace() if sorted_para_scores[0][1] > 0: top1 +=", "labels all_para_vectors = np.stack(all_para_vectors) all_question_vectors = np.stack(all_question_vectors) assert all_para_vectors.shape[0] == cum_num_lens[-1] assert all_question_vectors.shape[0]", "as F from torch.utils.data.sampler import RandomSampler import config from model import utils, data,", "sorted_para_scores = sorted(para_scores, key=lambda x: x[0], reverse=True) if sorted_para_scores[0][1] > 0: top1 +=", "classifier when evaluated on the annotated dev set.') logger.info('top-1: {:2.4f}, top-3: {:2.4f}, top-5:", "word_dict args.feature_dict = feature_dict ret = LSTMRetriever(args, word_dict, feature_dict) # load saved param", "ques else: documents = np.concatenate([documents, docs]) questions = np.concatenate([questions, ques]) ### create map", "args.train_time = False top_1 = 0 top_3 = 0 top_5 = 0 total_num_questions", "args.src, \"data\", args.domain, args.test_file_name), \"rb\") all_test_exs = pickle.load(fin) fin.close() logger.info(\"Loading done!\") logger.info(\"Num train", "epoch train_binary_classification(args, ret_model, optimizer, train_loader, verified_dev_loader=None) logger.info('checkpointing model at {}'.format(args.model_file)) ## check pointing##", "OUT_DIR = os.path.join(args.save_dir, args.src, args.domain, \"test/\") logger.info(\"Printing vectors at {}\".format(OUT_DIR)) if not os.path.exists(OUT_DIR):", "corpus, dev_loader, verified_dev_loader=None, save_scores = True): total_exs = 0 args.train_time = False ret_model.model.eval()", "\".pkl\" logger.info(\"Loading pickle files\") fin = open(os.path.join(args.data_dir, args.src, \"data\", args.domain, args.train_file_name), \"rb\") all_train_exs", "logger.info('Saving model at {}'.format(args.model_file)) logger.info(\"Logs saved at {}\".format(args.log_file)) save(args, ret_model.model, optimizer, args.model_file, epoch=stats['epoch'])", "pid in enumerate(ex[-1]): corpus.paragraphs[pid].model_score = scores[i] scores = scores > 0.5 a =", "os.path.join(args.save_dir, args.src, args.domain, \"dev/\") else: OUT_DIR = os.path.join(args.save_dir, args.src, args.domain, \"test/\") logger.info(\"Printing vectors", "0 for q_counter, ranked_para_ids in enumerate(topk_paras): total_num_questions += 1 for i, no_paras in", "= args.test_file_name + \".pkl\" logger.info(\"Loading pickle files\") fin = open(os.path.join(args.data_dir, args.src, \"data\", args.domain,", "train_time=train_time ) sampler = SequentialSampler(dataset) if not train_time else RandomSampler(dataset) loader = torch.utils.data.DataLoader(", "enumerate(tqdm(dev_loader)): if ex is None: raise BrokenPipeError inputs = [e if e is", "1: logger.info(\"Num test examples {}\".format(len(all_test_exs.paragraphs))) if args.pretrained is None: ret_model, optimizer, word_dict, feature_dict", "import pdb pdb.set_trace() num_paras = ex[1] qids = ex[-1] if args.save_para_clf_output: docs =", "else: raise RuntimeError('Unsupported optimizer: %s' % args.optimizer) optimizer.load_state_dict(model['state_dict']['optimizer']) logger.info('Model loaded...') return ret, optimizer,", "verified_dev_loader=None, save_scores = True, train=False, test=False): total_exs = 0 args.train_time = False ret_model.model.eval()", "corpus.paragraphs[pid].ans_occurance) for pid in corpus.questions[qid].pids] sorted_para_scores = sorted(para_scores, key=lambda x: x[0]) # import", "args.test == 1: args.test_file_name = args.test_file_name + \".pkl\" logger.info(\"Loading pickle files\") fin =", "model = torch.load(args.pretrained) word_dict = model['word_dict'] feature_dict = model['feature_dict'] args.vocab_size = len(word_dict) args.embedding_dim_orig", "= open(os.path.join(args.data_dir, args.src, \"data\", args.domain, args.test_file_name), \"rb\") all_test_exs = pickle.load(fin) fin.close() logger.info(\"Loading done!\")", "model from saved checkpoint {}'.format(args.pretrained)) model = torch.load(args.pretrained) word_dict = model['word_dict'] feature_dict =", "\"train/\") else: if args.is_test == 0: OUT_DIR = os.path.join(args.save_dir, args.src, args.domain, \"dev/\") else:", "if ex_counter == 0: documents = docs questions = ques else: documents =", "\"rb\") all_dev_exs = pickle.load(fin) fin.close() if args.test == 1: fin = open(os.path.join(args.data_dir, args.src,", "train paragraph vectors\") save_vectors(args, ret_model, all_train_exs, train_loader, verified_dev_loader=None, train=True) if args.test: args.is_test =", "= {}, top5 = {} '.format(top1 / len(corpus.questions), top3 / len(corpus.questions), top5 /", "sum_num_paras = 0 all_correct_answers = {} for ex_counter, ex in tqdm(enumerate(data_loader)): ret_input =", "= open(os.path.join(args.data_dir, args.src, \"data\", args.domain, args.dev_file_name), \"rb\") all_dev_exs = pickle.load(fin) fin.close() if args.test", "json.dump(qid2idx, open(OUT_DIR + 'map.json', 'w')) json.dump(all_correct_answers, open(OUT_DIR + 'correct_paras.json', 'w')) all_cumlen = np.array(cum_num_lens)", "= pickle.load(fin) fin.close() logger.info(\"Loading done!\") logger.info(\"Num train examples {}\".format(len(all_train_exs.paragraphs))) logger.info(\"Num dev examples {}\".format(len(all_dev_exs.paragraphs)))", "feature_dict ret = LSTMRetriever(args, word_dict, feature_dict) # load saved param values ret.model.load_state_dict(model['state_dict']['para_clf']) optimizer", "import torch.optim as optim import torch.nn.functional as F from torch.utils.data.sampler import RandomSampler import", "BaseException: logger.warn('[ WARN: Saving failed... continuing anyway. ]') # ------------------------------------------------------------------------------ # Main. #", "= np.stack(all_question_vectors) assert all_para_vectors.shape[0] == cum_num_lens[-1] assert all_question_vectors.shape[0] == len(cum_num_lens) assert all_question_vectors.shape[0] ==", "[] st = sum(num_paras[:i]) for j in range(num_paras[i]): if labels[st+j] == 1: all_correct_answers[map_counter].append(j)", "dev_loader, verified_dev_loader=None) logger.info(\"Saving train paragraph vectors\") save_vectors(args, ret_model, all_train_exs, train_loader, verified_dev_loader=None, train=True) if", "Variable(e.cuda(async=True)) for e in ex[:]] ret_input = [*inputs[:4]] total_exs += ex[0].size(0) scores, doc,", "optimizer = None if parameters is not None and len(parameters) > 0: if", "\"all_cumlen\", all_cumlen) return (top_1 * 1.0 / total_num_questions), (top_3 * 1.0 / total_num_questions),", "{'timer': global_timer, 'epoch': 0, 'best_valid': 0, 'best_verified_valid': 0, 'best_acc': 0, 'best_verified_acc': 0} def", "= {} '.format(top1, top3 ,top5 )) return top1 def get_topk_tfidf(corpus): top1 = 0", "logger.info('Accuracy of para classifier when evaluated on the annotated dev set.') logger.info('top-1: {:2.4f},", "ques[i] for i, pid in enumerate(ex[-1]): corpus.paragraphs[pid].model_score = scores[i] get_topk(corpus) print_vectors(args, para_vectors, question_vectors,", "+ \"all_cumlen\", all_cumlen) return (top_1 * 1.0 / total_num_questions), (top_3 * 1.0 /", "from multi_corpus import MultiCorpus from torch.utils.data.sampler import SequentialSampler, RandomSampler import math logger =", "vars(args) if epoch: params['epoch'] = epoch try: torch.save(params, filename) # bad hack for", "Parse cmdline args and setup environment args = config.get_args() # Set cuda args.cuda", "<= 0: top_1 += 1 break counter += num_paras[q_counter] logger.info('Accuracy of para classifier", "os.path.exists(OUT_DIR): os.makedirs(OUT_DIR) else: shutil.rmtree(OUT_DIR, ignore_errors=True) os.makedirs(OUT_DIR) json.dump(qid2idx, open(OUT_DIR + 'map.json', 'w')) json.dump(all_correct_ans, open(OUT_DIR", "if args.log_file: if args.checkpoint: logfile = logging.FileHandler(args.log_file, 'a') else: logfile = logging.FileHandler(args.log_file, 'w')", "in ex[-1]]): if qid not in question_vectors: question_vectors[qid] = ques[i] for i, pid", "= init_from_checkpoint(args) # make data loader logger.info(\"Making data loaders...\") if word_dict == None:", "is not None: args.vocab_size = len(word_dict) args.embedding_dim_orig = args.embedding_dim args.word_dict = word_dict args.feature_dict", "== 'sgd': optimizer = optim.SGD(parameters, args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay) elif args.optimizer == 'adamax': optimizer", "args.optimizer == 'adamax': optimizer = optim.Adamax(parameters, weight_decay=args.weight_decay) elif args.optimizer == 'nag': optimizer =", "for epoch in range(args.num_epochs): stats['epoch'] = epoch train_binary_classification(args, ret_model, optimizer, train_loader, verified_dev_loader=None) logger.info('checkpointing", "len(all_correct_ans) ## saving code if train: OUT_DIR = os.path.join(args.save_dir, args.src, args.domain, \"train/\") else:", "run_predictions(args, data_loader, model, eval_on_train_set=False): args.train_time = False top_1 = 0 top_3 = 0", "questions = np.concatenate([questions, ques]) ### create map and cum_num_lens for i, qid in", "fin.close() if args.test == 1: fin = open(os.path.join(args.data_dir, args.src, \"data\", args.domain, args.test_file_name), \"rb\")", "'state_dict': { 'para_clf': model.state_dict(), 'optimizer': optimizer.state_dict() }, 'word_dict': args.word_dict, 'feature_dict': args.feature_dict } args.word_dict", "annotated dev set.') logger.info('top-1: {:2.4f}, top-3: {:2.4f}, top-5: {:2.4f}'.format( (top_1 * 1.0 /", "tqdm(enumerate(data_loader)): ret_input = [*ex] y_num_occurrences = ex[3] labels = (y_num_occurrences > 0) try:", "weight_decay=args.weight_decay) elif args.optimizer == 'adamax': optimizer = optim.Adamax(parameters, weight_decay=args.weight_decay) elif args.optimizer == 'nag':", "saving code if args.save_para_clf_output: if eval_on_train_set: OUT_DIR = \"/iesl/canvas/sdhuliawala/vectors_web/train/\" else: OUT_DIR = \"/iesl/canvas/sdhuliawala/vectors_web/dev/\"", "scores, doc, ques = ret_model.score_paras(*ret_input) scores = scores.cpu().data.numpy() scores = scores.reshape((-1)) if save_scores:", "sorted_para_scores[:5]]) > 0: top5 += 1 top1 = top1/len(corpus.questions) top3 = top3/len(corpus.questions) top5", "%s' % args.optimizer) optimizer.load_state_dict(model['state_dict']['optimizer']) logger.info('Model loaded...') return ret, optimizer, word_dict, feature_dict def init_from_scratch(args,", "for e in ex[:]] ret_input = [*inputs[:4]] total_exs += ex[0].size(0) scores, _, _", "}, 'word_dict': args.word_dict, 'feature_dict': args.feature_dict } args.word_dict = None args.feature_dict = None params['config']", "dev_loader, verified_dev_loader=None) if stats['best_acc'] < top1: stats['best_acc'] = top1 logger.info('Best accuracy {}'.format(stats['best_acc'])) logger.info('Saving", "0: top_1 += 1 break counter += num_paras[q_counter] logger.info('Accuracy of para classifier when", "init_from_checkpoint(args): logger.info('Loading model from saved checkpoint {}'.format(args.pretrained)) model = torch.load(args.pretrained) word_dict = model['word_dict']", "para_vectors[pid] = doc[i] for i, qid in enumerate([corpus.paragraphs[pid].qid for pid in ex[-1]]): if", "= 0 top_3 = 0 top_5 = 0 total_num_questions = 0 map_counter =", "e in ex[:]] ret_input = [*inputs[:4]] total_exs += ex[0].size(0) scores, _, _ =", "= config.get_args() # Set cuda args.cuda = not args.no_cuda and torch.cuda.is_available() if args.cuda:", "ret.get_trainable_params() optimizer = None if parameters is not None and len(parameters) > 0:", "map_counter = 0 cum_num_lens = [] qid2idx = {} sum_num_paras = 0 all_correct_answers", "_ = ret_model.score_paras(*ret_input) scores = F.sigmoid(scores) y_num_occurrences = Variable(ex[-2]) labels = (y_num_occurrences >", "= os.path.join(args.save_dir, args.src, args.domain, \"test/\") logger.info(\"Printing vectors at {}\".format(OUT_DIR)) if not os.path.exists(OUT_DIR): os.makedirs(OUT_DIR)", "CONFIG logger.info('-' * 100) logger.info('CONFIG:\\n%s' % json.dumps(vars(args), indent=4, sort_keys=True)) # small can't test", "+= 1 if i <= 2: top_3 += 1 if i <= 0:", "all_train_exs = pickle.load(fin) fin.close() fin = open(os.path.join(args.data_dir, args.src, \"data\", args.domain, args.dev_file_name), \"rb\") all_dev_exs", "= Variable(ex[-2]) labels = (y_num_occurrences > 0).float() labels = labels.cuda() # BCE logits", "np.save(OUT_DIR + \"question\", questions) np.save(OUT_DIR + \"all_cumlen\", all_cumlen) return (top_1 * 1.0 /", "momentum=args.momentum, weight_decay=args.weight_decay) else: raise RuntimeError('Unsupported optimizer: %s' % args.optimizer) else: pass return ret,", "pin_memory=True ) return loader def init_from_checkpoint(args): logger.info('Loading model from saved checkpoint {}'.format(args.pretrained)) model", "print_vectors(args, para_vectors, question_vectors, corpus, train=False, test=False): all_question_vectors = [] all_para_vectors = [] qid2idx", "args.word_dict = word_dict args.feature_dict = feature_dict ret = LSTMRetriever(args, word_dict, feature_dict) # --------------------------------------------------------------------------", "single_answer=False, para_mode=args.para_mode, train_time=train_time ) sampler = SequentialSampler(dataset) if not train_time else RandomSampler(dataset) loader", "all_question_vectors = np.stack(all_question_vectors) assert all_para_vectors.shape[0] == cum_num_lens[-1] assert all_question_vectors.shape[0] == len(cum_num_lens) assert all_question_vectors.shape[0]", "== len(cum_num_lens) assert all_question_vectors.shape[0] == len(qid2idx) assert all_question_vectors.shape[0] == len(all_correct_ans) ## saving code", "labels[st+j] == 1: all_correct_answers[map_counter].append(j) ### Test case: assert len(all_correct_answers[map_counter]) == sum(labels.data.numpy()[st: st +", "= params['word_dict'] args.feature_dict = params['feature_dict'] except BaseException: logger.warn('[ WARN: Saving failed... continuing anyway.", "_, _ = ret_model.score_paras(*ret_input) y_num_occurrences = Variable(ex[-2]) labels = (y_num_occurrences > 0).float() labels", "/ total_num_questions))) ## saving code if args.save_para_clf_output: if eval_on_train_set: OUT_DIR = \"/iesl/canvas/sdhuliawala/vectors_web/train/\" else:", "labels = (y_num_occurrences > 0) try: topk_paras, docs, ques = model.return_topk(5,*ret_input) except RuntimeError:", "num_paras[i]]) map_counter += 1 counter = 0 for q_counter, ranked_para_ids in enumerate(topk_paras): total_num_questions", "args.log_file: if args.checkpoint: logfile = logging.FileHandler(args.log_file, 'a') else: logfile = logging.FileHandler(args.log_file, 'w') logfile.setFormatter(fmt)", "\"/iesl/canvas/sdhuliawala/vectors_web/train/\" else: OUT_DIR = \"/iesl/canvas/sdhuliawala/vectors_web/dev/\" if not os.path.exists(OUT_DIR): os.mkdir(OUT_DIR) else: shutil.rmtree(OUT_DIR, ignore_errors=True) os.mkdir(OUT_DIR)", "0 all_correct_answers = {} for ex_counter, ex in tqdm(enumerate(data_loader)): ret_input = [*ex] y_num_occurrences", "train_loader, verified_dev_loader=None) logger.info('checkpointing model at {}'.format(args.model_file)) ## check pointing## save(args, ret_model.model, optimizer, args.model_file+\".ckpt\",", "F from torch.utils.data.sampler import RandomSampler import config from model import utils, data, vector", "corpus.paragraphs[pid].model_score = scores[i] get_topk(corpus) print_vectors(args, para_vectors, question_vectors, corpus, train, test) def get_topk(corpus): top1", "100) logger.info('CONFIG:\\n%s' % json.dumps(vars(args), indent=4, sort_keys=True)) # small can't test if args.small ==", "if eval_on_train_set: OUT_DIR = \"/iesl/canvas/sdhuliawala/vectors_web/train/\" else: OUT_DIR = \"/iesl/canvas/sdhuliawala/vectors_web/dev/\" if not os.path.exists(OUT_DIR): os.mkdir(OUT_DIR)", "= len(word_dict) args.embedding_dim_orig = args.embedding_dim args.word_dict = word_dict args.feature_dict = feature_dict ret =", "# MODEL logger.info('-' * 100) # Parse cmdline args and setup environment args", "/ len(corpus.questions), top3 / len(corpus.questions), top5 / len(corpus.questions))) def run_predictions(args, data_loader, model, eval_on_train_set=False):", "False ret_model.model.eval() para_vectors = {} question_vectors = {} for idx, ex in enumerate(tqdm(data_loader)):", "map_counter sum_num_paras += num_paras[i] cum_num_lens.append(sum_num_paras) all_correct_answers[map_counter] = [] st = sum(num_paras[:i]) for j", "+= 1 counter = 0 for q_counter, ranked_para_ids in enumerate(topk_paras): total_num_questions += 1", "{}\".format(len(all_dev_exs.paragraphs))) if args.test == 1: logger.info(\"Num test examples {}\".format(len(all_test_exs.paragraphs))) if args.pretrained is None:", "{}, top3 = {}, top5 = {} '.format(top1 / len(corpus.questions), top3 / len(corpus.questions),", "cuda args.cuda = not args.no_cuda and torch.cuda.is_available() if args.cuda: torch.cuda.set_device(args.gpu) # Set random", "feature_dict = model['feature_dict'] args.vocab_size = len(word_dict) args.embedding_dim_orig = args.embedding_dim args.word_dict = word_dict args.feature_dict", "enumerate([corpus.paragraphs[pid].qid for pid in ex[-1]]): if qid not in question_vectors: question_vectors[qid] = ques[i]", "global_timer = utils.Timer() stats = {'timer': global_timer, 'epoch': 0, 'best_valid': 0, 'best_verified_valid': 0,", "logger.info('checkpointing model at {}'.format(args.model_file)) ## check pointing## save(args, ret_model.model, optimizer, args.model_file+\".ckpt\", epoch=stats['epoch']) logger.info(\"Evaluating", "save_scores: for i, pid in enumerate(ex[-1]): corpus.paragraphs[pid].model_score = scores[i] scores = scores >", "if i <= 4: top_5 += 1 if i <= 2: top_3 +=", "if args.eval_only: logger.info(\"Saving dev paragraph vectors\") save_vectors(args, ret_model, all_dev_exs, dev_loader, verified_dev_loader=None) logger.info(\"Saving train", "= epoch train_binary_classification(args, ret_model, optimizer, train_loader, verified_dev_loader=None) logger.info('checkpointing model at {}'.format(args.model_file)) ## check", "args.dev_file_name + \".pkl\" if args.test == 1: args.test_file_name = args.test_file_name + \".pkl\" logger.info(\"Loading", "train_binary_classification(args, ret_model, optimizer, train_loader, verified_dev_loader=None) logger.info('checkpointing model at {}'.format(args.model_file)) ## check pointing## save(args,", "ret_model, corpus, data_loader, verified_dev_loader=None, save_scores = True, train=False, test=False): total_exs = 0 args.train_time", "top3 = 0 top5 = 0 for qid in corpus.questions: para_scores = [(corpus.paragraphs[pid].model_score,corpus.paragraphs[pid].ans_occurance", "def get_topk(corpus): top1 = 0 top3 = 0 top5 = 0 for qid", "+ \"question\", all_question_vectors) np.save(OUT_DIR + \"all_cumlen\", cum_num_lens) def save_vectors(args, ret_model, corpus, data_loader, verified_dev_loader=None,", "all_correct_answers[map_counter] = [] st = sum(num_paras[:i]) for j in range(num_paras[i]): if labels[st+j] ==", "args.small == 1: args.train_file_name = args.train_file_name + \"_small\" args.dev_file_name = args.dev_file_name + \"_small\"", "type(e) != type(ex[0]) else Variable(e.cuda(async=True)) for e in ex[:]] ret_input = [*inputs[:4]] scores,", "len(train_loader), para_loss.avg)) para_loss.reset() def eval_binary_classification(args, ret_model, corpus, dev_loader, verified_dev_loader=None, save_scores = True): total_exs", "ret_model.model, optimizer, args.model_file+\".ckpt\", epoch=stats['epoch']) logger.info(\"Evaluating on the full dev set....\") top1 = eval_binary_classification(args,", "RuntimeError('Unsupported optimizer: %s' % args.optimizer) optimizer.load_state_dict(model['state_dict']['optimizer']) logger.info('Model loaded...') return ret, optimizer, word_dict, feature_dict", "and torch.cuda.is_available() if args.cuda: torch.cuda.set_device(args.gpu) # Set random state np.random.seed(args.random_seed) torch.manual_seed(args.random_seed) if args.cuda:", "= optim.SGD(parameters, args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay) elif args.optimizer == 'adamax': optimizer = optim.Adamax(parameters, weight_decay=args.weight_decay)", "no_paras in enumerate(ranked_para_ids): if labels[counter + no_paras ] ==1: if i <= 4:", "= np.array(cum_num_lens) np.save(OUT_DIR + \"document\", documents) np.save(OUT_DIR + \"question\", questions) np.save(OUT_DIR + \"all_cumlen\",", "ret_input = [*inputs[:4]] total_exs += ex[0].size(0) scores, doc, ques = ret_model.score_paras(*ret_input) scores =", "optimizer: %s' % args.optimizer) else: pass return ret, optimizer, word_dict, feature_dict def train_binary_classification(args,", "all_train_exs) else: ret_model, optimizer, word_dict, feature_dict = init_from_checkpoint(args) # make data loader logger.info(\"Making", "optimizer = NAG(parameters, args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay) else: raise RuntimeError('Unsupported optimizer: %s' % args.optimizer)", "1 if i <= 2: top_3 += 1 if i <= 0: top_1", "json import os import pickle import sys import logging import shutil from tqdm", "make_data_loader(args, all_train_exs, train_time=True) dev_loader = make_data_loader(args, all_dev_exs) if args.test: test_loader = make_data_loader(args, all_test_exs)", "Variable(ex[-2]) labels = (y_num_occurrences > 0).float() labels = labels.cuda() # BCE logits loss", "np.random.seed(args.random_seed) torch.manual_seed(args.random_seed) if args.cuda: torch.cuda.manual_seed(args.random_seed) # Set logging logger.setLevel(logging.INFO) fmt = logging.Formatter('%(asctime)s: %(message)s',", "fin.close() logger.info(\"Loading done!\") logger.info(\"Num train examples {}\".format(len(all_train_exs.paragraphs))) logger.info(\"Num dev examples {}\".format(len(all_dev_exs.paragraphs))) if args.test", "ques = ret_model.score_paras(*ret_input) scores = scores.cpu().data.numpy() scores = scores.reshape((-1)) if save_scores: for i,", "top5/len(corpus.questions) logger.info('top1 = {}, top3 = {}, top5 = {} '.format(top1, top3 ,top5", "+= len(corpus.questions[qid].pids) cum_num_lens.append(cum_num_len) for para_i, pid in enumerate(corpus.questions[qid].pids): if corpus.paragraphs[pid].ans_occurance > 0: labels.append(para_i)", "np.concatenate([questions, ques]) ### create map and cum_num_lens for i, qid in enumerate(qids): qid2idx[qid]", "ret_model.score_paras(*ret_input) scores = scores.cpu().data.numpy() scores = scores.reshape((-1)) if save_scores: for i, pid in", "\"_small\" args.train_file_name = args.train_file_name + \".pkl\" args.dev_file_name = args.dev_file_name + \".pkl\" if args.test", "args.feature_dict = feature_dict ret = LSTMRetriever(args, word_dict, feature_dict) # -------------------------------------------------------------------------- # TRAIN/VALID LOOP", "on the annotated dev set.') logger.info('top-1: {:2.4f}, top-3: {:2.4f}, top-5: {:2.4f}'.format( (top_1 *", "np.stack(all_para_vectors) all_question_vectors = np.stack(all_question_vectors) assert all_para_vectors.shape[0] == cum_num_lens[-1] assert all_question_vectors.shape[0] == len(cum_num_lens) assert", "multi_corpus import MultiCorpus from torch.utils.data.sampler import SequentialSampler, RandomSampler import math logger = logging.getLogger()", "stats['epoch'] = epoch train_binary_classification(args, ret_model, optimizer, train_loader, verified_dev_loader=None) logger.info('checkpointing model at {}'.format(args.model_file)) ##", "verified_dev_loader=None, save_scores = True): total_exs = 0 args.train_time = False ret_model.model.eval() accuracy =", "pass return ret, optimizer, word_dict, feature_dict def train_binary_classification(args, ret_model, optimizer, train_loader, verified_dev_loader=None): args.train_time", "config from model import utils, data, vector from model.retriever import LSTMRetriever from multi_corpus", "saving code if train: OUT_DIR = os.path.join(args.save_dir, args.src, args.domain, \"train/\") else: if args.is_test", "momentum=args.momentum, weight_decay=args.weight_decay) elif args.optimizer == 'adamax': optimizer = optim.Adamax(parameters, weight_decay=args.weight_decay) elif args.optimizer ==", "corpus.paragraphs[pid].ans_occurance > 0: labels.append(para_i) all_para_vectors.append(para_vectors[pid]) all_correct_ans[qid] = labels all_para_vectors = np.stack(all_para_vectors) all_question_vectors =", "args.train_file_name + \"_small\" args.dev_file_name = args.dev_file_name + \"_small\" if args.test == 1: args.test_file_name", "logging.Formatter('%(asctime)s: %(message)s', '%m/%d/%Y %I:%M:%S %p') console = logging.StreamHandler() console.setFormatter(fmt) logger.addHandler(console) if args.log_file: if", "and len(parameters) > 0: if args.optimizer == 'sgd': optimizer = optim.SGD(parameters, args.learning_rate, momentum=args.momentum,", "save_scores: for i, pid in enumerate(ex[-1]): para_vectors[pid] = doc[i] for i, qid in", "assert len(cum_num_lens) == len(qid2idx) assert len(cum_num_lens) == len(all_correct_answers) json.dump(qid2idx, open(OUT_DIR + 'map.json', 'w'))", "os.makedirs(OUT_DIR) json.dump(qid2idx, open(OUT_DIR + 'map.json', 'w')) json.dump(all_correct_ans, open(OUT_DIR + 'correct_paras.json', 'w')) all_cumlen =", "{} for idx, ex in enumerate(tqdm(data_loader)): if ex is None: raise BrokenPipeError inputs", "------------------------------------------------------------------------------ # Main. # ------------------------------------------------------------------------------ def main(args): # PRINT CONFIG logger.info('-' * 100)", "test_loader, verified_dev_loader=None) else: get_topk_tfidf(all_dev_exs) for epoch in range(args.num_epochs): stats['epoch'] = epoch train_binary_classification(args, ret_model,", "= top1 logger.info('Best accuracy {}'.format(stats['best_acc'])) logger.info('Saving model at {}'.format(args.model_file)) logger.info(\"Logs saved at {}\".format(args.log_file))", "== 0: OUT_DIR = os.path.join(args.save_dir, args.src, args.domain, \"dev/\") else: OUT_DIR = os.path.join(args.save_dir, args.src,", "if args.test: test_loader = make_data_loader(args, all_test_exs) if args.eval_only: logger.info(\"Saving dev paragraph vectors\") save_vectors(args,", "== sum(labels.data.numpy()[st: st + num_paras[i]]) map_counter += 1 counter = 0 for q_counter,", "= [] all_correct_ans = {} cum_num_len = 0 for question_i, qid in enumerate(corpus.questions):", "dev set....\") top1 = eval_binary_classification(args, ret_model, all_dev_exs, dev_loader, verified_dev_loader=None) if stats['best_acc'] < top1:", "1 if i <= 0: top_1 += 1 break counter += num_paras[q_counter] logger.info('Accuracy", "shutil.rmtree(OUT_DIR, ignore_errors=True) os.mkdir(OUT_DIR) #Test cases assert cum_num_lens[-1] == documents.shape[0] assert questions.shape[0] == documents.shape[0]", "for pid in ex[-1]]): if qid not in question_vectors: question_vectors[qid] = ques[i] for", "= 0 map_counter = 0 cum_num_lens = [] qid2idx = {} sum_num_paras =", "= {}, top3 = {}, top5 = {} '.format(top1 / len(corpus.questions), top3 /", "args.is_test = 1 logger.info(\"Saving test paragraph vectors\") save_vectors(args, ret_model, all_test_exs, test_loader, verified_dev_loader=None) else:", "qid2idx = {} cum_num_lens = [] all_correct_ans = {} cum_num_len = 0 for", "word_dict args.feature_dict = feature_dict ret = LSTMRetriever(args, word_dict, feature_dict) # -------------------------------------------------------------------------- # TRAIN/VALID", "## saving code if args.save_para_clf_output: if eval_on_train_set: OUT_DIR = \"/iesl/canvas/sdhuliawala/vectors_web/train/\" else: OUT_DIR =", "args.test = 0 if args.small == 1: args.train_file_name = args.train_file_name + \"_small\" args.dev_file_name", "Main. # ------------------------------------------------------------------------------ def main(args): # PRINT CONFIG logger.info('-' * 100) logger.info('CONFIG:\\n%s' %", "args.feature_dict = feature_dict ret = LSTMRetriever(args, word_dict, feature_dict) # load saved param values", "docs]) questions = np.concatenate([questions, ques]) ### create map and cum_num_lens for i, qid", "args.pretrained is None: ret_model, optimizer, word_dict, feature_dict = init_from_scratch(args, all_train_exs) else: ret_model, optimizer,", "= 0 for q_counter, ranked_para_ids in enumerate(topk_paras): total_num_questions += 1 for i, no_paras", "if args.save_para_clf_output: docs = docs.cpu().data.numpy() ques = ques.cpu().data.numpy() if ex_counter == 0: documents", "len(corpus.questions), top3 / len(corpus.questions), top5 / len(corpus.questions))) def run_predictions(args, data_loader, model, eval_on_train_set=False): args.train_time", "i <= 4: top_5 += 1 if i <= 2: top_3 += 1", "in ex[:]] ret_input = [*inputs[:4]] scores, _, _ = ret_model.score_paras(*ret_input) y_num_occurrences = Variable(ex[-2])", "optimizer, args.model_file+\".ckpt\", epoch=stats['epoch']) logger.info(\"Evaluating on the full dev set....\") top1 = eval_binary_classification(args, ret_model,", "or get vocab word_dict = utils.build_word_dict(args, train_exs) if word_dict is not None: args.vocab_size", "assert all_question_vectors.shape[0] == len(qid2idx) assert all_question_vectors.shape[0] == len(all_correct_ans) ## saving code if train:", "logger.info('Loading model from saved checkpoint {}'.format(args.pretrained)) model = torch.load(args.pretrained) word_dict = model['word_dict'] feature_dict", "= [*inputs[:4]] total_exs += ex[0].size(0) scores, doc, ques = ret_model.score_paras(*ret_input) scores = scores.cpu().data.numpy()", "logging.getLogger() global_timer = utils.Timer() stats = {'timer': global_timer, 'epoch': 0, 'best_valid': 0, 'best_verified_valid':", "qid2idx[qid] = question_i cum_num_len += len(corpus.questions[qid].pids) cum_num_lens.append(cum_num_len) for para_i, pid in enumerate(corpus.questions[qid].pids): if", "for i, pid in enumerate(ex[-1]): corpus.paragraphs[pid].model_score = scores[i] scores = scores > 0.5", "open(OUT_DIR + 'map.json', 'w')) json.dump(all_correct_answers, open(OUT_DIR + 'correct_paras.json', 'w')) all_cumlen = np.array(cum_num_lens) np.save(OUT_DIR", "verified_dev_loader=None) else: get_topk_tfidf(all_dev_exs) for epoch in range(args.num_epochs): stats['epoch'] = epoch train_binary_classification(args, ret_model, optimizer,", "ex_counter, ex in tqdm(enumerate(data_loader)): ret_input = [*ex] y_num_occurrences = ex[3] labels = (y_num_occurrences", "### Test case: assert len(all_correct_answers[map_counter]) == sum(labels.data.numpy()[st: st + num_paras[i]]) map_counter += 1", "else: get_topk_tfidf(all_dev_exs) for epoch in range(args.num_epochs): stats['epoch'] = epoch train_binary_classification(args, ret_model, optimizer, train_loader,", "enumerate(tqdm(data_loader)): if ex is None: raise BrokenPipeError inputs = [e if e is", "> 0) try: topk_paras, docs, ques = model.return_topk(5,*ret_input) except RuntimeError: import pdb pdb.set_trace()", "for pid in corpus.questions[qid].pids] sorted_para_scores = sorted(para_scores, key=lambda x: x[0], reverse=True) if sorted_para_scores[0][1]", "* 1.0 / total_num_questions) def save(args, model, optimizer, filename, epoch=None): params = {", "train examples {}\".format(len(all_train_exs.paragraphs))) logger.info(\"Num dev examples {}\".format(len(all_dev_exs.paragraphs))) if args.test == 1: logger.info(\"Num test", "if args.small == 1: args.test = 0 if args.small == 1: args.train_file_name =", "if args.cuda: torch.cuda.set_device(args.gpu) # Set random state np.random.seed(args.random_seed) torch.manual_seed(args.random_seed) if args.cuda: torch.cuda.manual_seed(args.random_seed) #", "'w')) json.dump(all_correct_ans, open(OUT_DIR + 'correct_paras.json', 'w')) all_cumlen = np.array(cum_num_lens) np.save(OUT_DIR + \"document\", all_para_vectors)", "import torch import numpy as np import json import os import pickle import", "{} '.format(accuracy/total_exs)) top1 = get_topk(corpus) return top1 def print_vectors(args, para_vectors, question_vectors, corpus, train=False,", "all_dev_exs, dev_loader, verified_dev_loader=None) if stats['best_acc'] < top1: stats['best_acc'] = top1 logger.info('Best accuracy {}'.format(stats['best_acc']))", "ex in enumerate(tqdm(dev_loader)): if ex is None: raise BrokenPipeError inputs = [e if", "{:2.4f}'.format( (top_1 * 1.0 / total_num_questions), (top_3 * 1.0 / total_num_questions), (top_5 *", "documents.shape[0] assert len(cum_num_lens) == len(qid2idx) assert len(cum_num_lens) == len(all_correct_answers) json.dump(qid2idx, open(OUT_DIR + 'map.json',", "logfile = logging.FileHandler(args.log_file, 'a') else: logfile = logging.FileHandler(args.log_file, 'w') logfile.setFormatter(fmt) logger.addHandler(logfile) logger.info('[ COMMAND:", "params = { 'state_dict': { 'para_clf': model.state_dict(), 'optimizer': optimizer.state_dict() }, 'word_dict': args.word_dict, 'feature_dict':", "try: topk_paras, docs, ques = model.return_topk(5,*ret_input) except RuntimeError: import pdb pdb.set_trace() num_paras =", "args.test_file_name + \"_small\" args.train_file_name = args.train_file_name + \".pkl\" args.dev_file_name = args.dev_file_name + \".pkl\"", "= logging.getLogger() global_timer = utils.Timer() stats = {'timer': global_timer, 'epoch': 0, 'best_valid': 0,", "at {}\".format(args.log_file)) save(args, ret_model.model, optimizer, args.model_file, epoch=stats['epoch']) if __name__ == '__main__': # MODEL", "optimizer, train_loader, verified_dev_loader=None) logger.info('checkpointing model at {}'.format(args.model_file)) ## check pointing## save(args, ret_model.model, optimizer,", "filename, epoch=None): params = { 'state_dict': { 'para_clf': model.state_dict(), 'optimizer': optimizer.state_dict() }, 'word_dict':", "args.feature_dict, single_answer=False, para_mode=args.para_mode, train_time=train_time ) sampler = SequentialSampler(dataset) if not train_time else RandomSampler(dataset)", "verified_dev_loader=None, train=True) if args.test: args.is_test = 1 logger.info(\"Saving test paragraph vectors\") save_vectors(args, ret_model,", "save_vectors(args, ret_model, corpus, data_loader, verified_dev_loader=None, save_scores = True, train=False, test=False): total_exs = 0", "ex[0].size(0) scores, doc, ques = ret_model.score_paras(*ret_input) scores = scores.cpu().data.numpy() scores = scores.reshape((-1)) if", "enumerate(qids): qid2idx[qid] = map_counter sum_num_paras += num_paras[i] cum_num_lens.append(sum_num_paras) all_correct_answers[map_counter] = [] st =", "+= 1 if sum([ans[1] for ans in sorted_para_scores[:5]]) > 0: top5 += 1", "all_question_vectors.append(question_vectors[qid]) qid2idx[qid] = question_i cum_num_len += len(corpus.questions[qid].pids) cum_num_lens.append(cum_num_len) for para_i, pid in enumerate(corpus.questions[qid].pids):", "0).float() labels = labels.data.numpy() scores = scores.cpu().data.numpy() scores = scores.reshape((-1)) if save_scores: for", "params['config'] = vars(args) if epoch: params['epoch'] = epoch try: torch.save(params, filename) # bad", "RandomSampler import config from model import utils, data, vector from model.retriever import LSTMRetriever", "logger.addHandler(console) if args.log_file: if args.checkpoint: logfile = logging.FileHandler(args.log_file, 'a') else: logfile = logging.FileHandler(args.log_file,", "# BCE logits loss batch_para_loss = F.binary_cross_entropy_with_logits(scores.squeeze(1), labels) optimizer.zero_grad() batch_para_loss.backward() torch.nn.utils.clip_grad_norm(ret_model.get_trainable_params(), 2.0) optimizer.step()", "json.dump(all_correct_ans, open(OUT_DIR + 'correct_paras.json', 'w')) all_cumlen = np.array(cum_num_lens) np.save(OUT_DIR + \"document\", all_para_vectors) np.save(OUT_DIR", "= optim.Adamax(parameters, weight_decay=args.weight_decay) elif args.optimizer == 'nag': optimizer = NAG(parameters, args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay)", "not os.path.exists(OUT_DIR): os.mkdir(OUT_DIR) else: shutil.rmtree(OUT_DIR, ignore_errors=True) os.mkdir(OUT_DIR) #Test cases assert cum_num_lens[-1] == documents.shape[0]", "ret_model, optimizer, word_dict, feature_dict = init_from_scratch(args, all_train_exs) else: ret_model, optimizer, word_dict, feature_dict =", "doc, ques = ret_model.score_paras(*ret_input) scores = scores.cpu().data.numpy() scores = scores.reshape((-1)) if save_scores: for", "create map and cum_num_lens for i, qid in enumerate(qids): qid2idx[qid] = map_counter sum_num_paras", "if sorted_para_scores[0][1] > 0: top1 += 1 if sum([ans[1] for ans in sorted_para_scores[:3]])", "if args.test == 1: logger.info(\"Num test examples {}\".format(len(all_test_exs.paragraphs))) if args.pretrained is None: ret_model,", "logger.warn('[ WARN: Saving failed... continuing anyway. ]') # ------------------------------------------------------------------------------ # Main. # ------------------------------------------------------------------------------", "sampler = SequentialSampler(dataset) if not train_time else RandomSampler(dataset) loader = torch.utils.data.DataLoader( dataset, batch_size=args.batch_size,", "= word_dict args.feature_dict = feature_dict ret = LSTMRetriever(args, word_dict, feature_dict) # -------------------------------------------------------------------------- #", "elif args.optimizer == 'nag': optimizer = NAG(parameters, args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay) else: raise RuntimeError('Unsupported", "= args.dev_file_name + \"_small\" if args.test == 1: args.test_file_name = args.test_file_name + \"_small\"", "import shutil from tqdm import tqdm from torch.autograd import Variable import torch.optim as", "para loss = {:2.4f}'.format( stats['epoch'], idx, len(train_loader), para_loss.avg)) para_loss.reset() def eval_binary_classification(args, ret_model, corpus,", "scores = scores > 0.5 a = scores == labels accuracy += a.sum()", "map and cum_num_lens for i, qid in enumerate(qids): qid2idx[qid] = map_counter sum_num_paras +=", "word_dict == None: args.word_dict = utils.build_word_dict(args, (all_train_exs, all_dev_exs)) word_dict = args.word_dict train_loader =", "== 0 and idx > 0: logger.info('Epoch = {} | iter={}/{} | para", "= False ret_model.model.eval() para_vectors = {} question_vectors = {} for idx, ex in", "open(OUT_DIR + 'correct_paras.json', 'w')) all_cumlen = np.array(cum_num_lens) np.save(OUT_DIR + \"document\", all_para_vectors) np.save(OUT_DIR +", "ignore_errors=True) os.makedirs(OUT_DIR) json.dump(qid2idx, open(OUT_DIR + 'map.json', 'w')) json.dump(all_correct_ans, open(OUT_DIR + 'correct_paras.json', 'w')) all_cumlen", "== len(all_correct_answers) json.dump(qid2idx, open(OUT_DIR + 'map.json', 'w')) json.dump(all_correct_answers, open(OUT_DIR + 'correct_paras.json', 'w')) all_cumlen", "sampler=sampler, num_workers=args.data_workers, collate_fn=vector.batchify(args, args.para_mode, train_time=train_time), pin_memory=True ) return loader def init_from_checkpoint(args): logger.info('Loading model", "question_vectors = {} for idx, ex in enumerate(tqdm(data_loader)): if ex is None: raise", "= [(corpus.paragraphs[pid].tfidf_score, corpus.paragraphs[pid].ans_occurance) for pid in corpus.questions[qid].pids] sorted_para_scores = sorted(para_scores, key=lambda x: x[0])", "qid in corpus.questions: para_scores = [(corpus.paragraphs[pid].model_score,corpus.paragraphs[pid].ans_occurance ) for pid in corpus.questions[qid].pids] sorted_para_scores =", "examples {}\".format(len(all_test_exs.paragraphs))) if args.pretrained is None: ret_model, optimizer, word_dict, feature_dict = init_from_scratch(args, all_train_exs)", "question_i cum_num_len += len(corpus.questions[qid].pids) cum_num_lens.append(cum_num_len) for para_i, pid in enumerate(corpus.questions[qid].pids): if corpus.paragraphs[pid].ans_occurance >", "dev set.') logger.info('top-1: {:2.4f}, top-3: {:2.4f}, top-5: {:2.4f}'.format( (top_1 * 1.0 / total_num_questions),", "batch_para_loss = F.binary_cross_entropy_with_logits(scores.squeeze(1), labels) optimizer.zero_grad() batch_para_loss.backward() torch.nn.utils.clip_grad_norm(ret_model.get_trainable_params(), 2.0) optimizer.step() para_loss.update(batch_para_loss.data.item()) if math.isnan(para_loss.avg): import", "sum([ans[1] for ans in sorted_para_scores[:3]]) > 0: top3 += 1 if sum([ans[1] for", "args.optimizer == 'nag': optimizer = NAG(parameters, args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay) else: raise RuntimeError('Unsupported optimizer:", "args.para_mode, train_time=train_time), pin_memory=True ) return loader def init_from_checkpoint(args): logger.info('Loading model from saved checkpoint", "batch_size=args.batch_size, sampler=sampler, num_workers=args.data_workers, collate_fn=vector.batchify(args, args.para_mode, train_time=train_time), pin_memory=True ) return loader def init_from_checkpoint(args): logger.info('Loading", "1: args.test_file_name = args.test_file_name + \"_small\" args.train_file_name = args.train_file_name + \".pkl\" args.dev_file_name =", "cum_num_lens[-1] == documents.shape[0] assert questions.shape[0] == documents.shape[0] assert len(cum_num_lens) == len(qid2idx) assert len(cum_num_lens)", "[*inputs[:4]] total_exs += ex[0].size(0) scores, doc, ques = ret_model.score_paras(*ret_input) scores = scores.cpu().data.numpy() scores", "= ex[-1] if args.save_para_clf_output: docs = docs.cpu().data.numpy() ques = ques.cpu().data.numpy() if ex_counter ==", "0 if args.small == 1: args.train_file_name = args.train_file_name + \"_small\" args.dev_file_name = args.dev_file_name", "[] all_para_vectors = [] qid2idx = {} cum_num_lens = [] all_correct_ans = {}", "and cum_num_lens for i, qid in enumerate(qids): qid2idx[qid] = map_counter sum_num_paras += num_paras[i]", "qid in enumerate(corpus.questions): labels = [] all_question_vectors.append(question_vectors[qid]) qid2idx[qid] = question_i cum_num_len += len(corpus.questions[qid].pids)", "top3 += 1 if sum([ans[1] for ans in sorted_para_scores[:5]]) > 0: top5 +=", "(y_num_occurrences > 0).float() labels = labels.cuda() # BCE logits loss batch_para_loss = F.binary_cross_entropy_with_logits(scores.squeeze(1),", "== labels accuracy += a.sum() logger.info('Eval accuracy = {} '.format(accuracy/total_exs)) top1 = get_topk(corpus)", "np import json import os import pickle import sys import logging import shutil", "= torch.utils.data.DataLoader( dataset, batch_size=args.batch_size, sampler=sampler, num_workers=args.data_workers, collate_fn=vector.batchify(args, args.para_mode, train_time=train_time), pin_memory=True ) return loader", "top5 = {} '.format(top1, top3 ,top5 )) return top1 def get_topk_tfidf(corpus): top1 =", "scores.cpu().data.numpy() scores = scores.reshape((-1)) if save_scores: for i, pid in enumerate(ex[-1]): corpus.paragraphs[pid].model_score =", "args.src, args.domain, \"test/\") logger.info(\"Printing vectors at {}\".format(OUT_DIR)) if not os.path.exists(OUT_DIR): os.makedirs(OUT_DIR) else: shutil.rmtree(OUT_DIR,", "np.save(OUT_DIR + \"document\", all_para_vectors) np.save(OUT_DIR + \"question\", all_question_vectors) np.save(OUT_DIR + \"all_cumlen\", cum_num_lens) def", "> 0: if args.optimizer == 'sgd': optimizer = optim.SGD(parameters, args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay) elif", "in enumerate(qids): qid2idx[qid] = map_counter sum_num_paras += num_paras[i] cum_num_lens.append(sum_num_paras) all_correct_answers[map_counter] = [] st", "all_cumlen = np.array(cum_num_lens) np.save(OUT_DIR + \"document\", documents) np.save(OUT_DIR + \"question\", questions) np.save(OUT_DIR +", "in enumerate(corpus.questions): labels = [] all_question_vectors.append(question_vectors[qid]) qid2idx[qid] = question_i cum_num_len += len(corpus.questions[qid].pids) cum_num_lens.append(cum_num_len)", "RandomSampler import math logger = logging.getLogger() global_timer = utils.Timer() stats = {'timer': global_timer,", "st = sum(num_paras[:i]) for j in range(num_paras[i]): if labels[st+j] == 1: all_correct_answers[map_counter].append(j) ###", "or type(e) != type(ex[0]) else Variable(e.cuda(async=True)) for e in ex[:]] ret_input = [*inputs[:4]]", "ret_model, corpus, dev_loader, verified_dev_loader=None, save_scores = True): total_exs = 0 args.train_time = False", "= ret.get_trainable_params() optimizer = None if parameters is not None and len(parameters) >", "==1: if i <= 4: top_5 += 1 if i <= 2: top_3", "optimizer: %s' % args.optimizer) optimizer.load_state_dict(model['state_dict']['optimizer']) logger.info('Model loaded...') return ret, optimizer, word_dict, feature_dict def", "top1 logger.info('Best accuracy {}'.format(stats['best_acc'])) logger.info('Saving model at {}'.format(args.model_file)) logger.info(\"Logs saved at {}\".format(args.log_file)) save(args,", "= 0.0 for idx, ex in enumerate(tqdm(dev_loader)): if ex is None: raise BrokenPipeError", "documents = docs questions = ques else: documents = np.concatenate([documents, docs]) questions =", "* 100) # Parse cmdline args and setup environment args = config.get_args() #", "open(os.path.join(args.data_dir, args.src, \"data\", args.domain, args.train_file_name), \"rb\") all_train_exs = pickle.load(fin) fin.close() fin = open(os.path.join(args.data_dir,", "top-3: {:2.4f}, top-5: {:2.4f}'.format( (top_1 * 1.0 / total_num_questions), (top_3 * 1.0 /", "para_i, pid in enumerate(corpus.questions[qid].pids): if corpus.paragraphs[pid].ans_occurance > 0: labels.append(para_i) all_para_vectors.append(para_vectors[pid]) all_correct_ans[qid] = labels", "0 for question_i, qid in enumerate(corpus.questions): labels = [] all_question_vectors.append(question_vectors[qid]) qid2idx[qid] = question_i", "ret_model, optimizer, train_loader, verified_dev_loader=None): args.train_time = True para_loss = utils.AverageMeter() ret_model.model.train() for idx,", "tqdm import tqdm from torch.autograd import Variable import torch.optim as optim import torch.nn.functional", "is not None and len(parameters) > 0: if args.optimizer == 'sgd': optimizer =", "pdb.set_trace() if idx % 25 == 0 and idx > 0: logger.info('Epoch =", "np.save(OUT_DIR + \"all_cumlen\", all_cumlen) return (top_1 * 1.0 / total_num_questions), (top_3 * 1.0", "ret.get_trainable_params() if args.optimizer == 'sgd': optimizer = optim.SGD(parameters, args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay) elif args.optimizer", "fin = open(os.path.join(args.data_dir, args.src, \"data\", args.domain, args.test_file_name), \"rb\") all_test_exs = pickle.load(fin) fin.close() logger.info(\"Loading", "= ex[1] qids = ex[-1] if args.save_para_clf_output: docs = docs.cpu().data.numpy() ques = ques.cpu().data.numpy()", "'word_dict': args.word_dict, 'feature_dict': args.feature_dict } args.word_dict = None args.feature_dict = None params['config'] =", "[] qid2idx = {} cum_num_lens = [] all_correct_ans = {} cum_num_len = 0", "= ques[i] for i, pid in enumerate(ex[-1]): corpus.paragraphs[pid].model_score = scores[i] get_topk(corpus) print_vectors(args, para_vectors,", "args.src, args.domain, \"train/\") else: if args.is_test == 0: OUT_DIR = os.path.join(args.save_dir, args.src, args.domain,", "sum_num_paras += num_paras[i] cum_num_lens.append(sum_num_paras) all_correct_answers[map_counter] = [] st = sum(num_paras[:i]) for j in", "open(os.path.join(args.data_dir, args.src, \"data\", args.domain, args.test_file_name), \"rb\") all_test_exs = pickle.load(fin) fin.close() logger.info(\"Loading done!\") logger.info(\"Num", "= labels all_para_vectors = np.stack(all_para_vectors) all_question_vectors = np.stack(all_question_vectors) assert all_para_vectors.shape[0] == cum_num_lens[-1] assert", "1 for i, no_paras in enumerate(ranked_para_ids): if labels[counter + no_paras ] ==1: if", "logger.info('top-1: {:2.4f}, top-3: {:2.4f}, top-5: {:2.4f}'.format( (top_1 * 1.0 / total_num_questions), (top_3 *", "None parameters = ret.get_trainable_params() if args.optimizer == 'sgd': optimizer = optim.SGD(parameters, args.learning_rate, momentum=args.momentum,", "LSTMRetriever(args, word_dict, feature_dict) # -------------------------------------------------------------------------- # TRAIN/VALID LOOP # -------------------------------------------------------------------------- # train parameters", "word_dict, feature_dict = init_from_scratch(args, all_train_exs) else: ret_model, optimizer, word_dict, feature_dict = init_from_checkpoint(args) #", "qid not in question_vectors: question_vectors[qid] = ques[i] for i, pid in enumerate(ex[-1]): corpus.paragraphs[pid].model_score", "if i <= 0: top_1 += 1 break counter += num_paras[q_counter] logger.info('Accuracy of", "test_loader = make_data_loader(args, all_test_exs) if args.eval_only: logger.info(\"Saving dev paragraph vectors\") save_vectors(args, ret_model, all_dev_exs,", "not train_time else RandomSampler(dataset) loader = torch.utils.data.DataLoader( dataset, batch_size=args.batch_size, sampler=sampler, num_workers=args.data_workers, collate_fn=vector.batchify(args, args.para_mode,", "False top_1 = 0 top_3 = 0 top_5 = 0 total_num_questions = 0", "\"document\", all_para_vectors) np.save(OUT_DIR + \"question\", all_question_vectors) np.save(OUT_DIR + \"all_cumlen\", cum_num_lens) def save_vectors(args, ret_model,", "when evaluated on the annotated dev set.') logger.info('top-1: {:2.4f}, top-3: {:2.4f}, top-5: {:2.4f}'.format(", "+ 'map.json', 'w')) json.dump(all_correct_ans, open(OUT_DIR + 'correct_paras.json', 'w')) all_cumlen = np.array(cum_num_lens) np.save(OUT_DIR +", "(y_num_occurrences > 0).float() labels = labels.data.numpy() scores = scores.cpu().data.numpy() scores = scores.reshape((-1)) if", "weight_decay=args.weight_decay) else: raise RuntimeError('Unsupported optimizer: %s' % args.optimizer) else: pass return ret, optimizer,", "paragraph vectors\") save_vectors(args, ret_model, all_test_exs, test_loader, verified_dev_loader=None) else: get_topk_tfidf(all_dev_exs) for epoch in range(args.num_epochs):", "labels = (y_num_occurrences > 0).float() labels = labels.cuda() # BCE logits loss batch_para_loss", "if args.test == 1: fin = open(os.path.join(args.data_dir, args.src, \"data\", args.domain, args.test_file_name), \"rb\") all_test_exs", "para_mode=args.para_mode, train_time=train_time ) sampler = SequentialSampler(dataset) if not train_time else RandomSampler(dataset) loader =", "json.dump(all_correct_answers, open(OUT_DIR + 'correct_paras.json', 'w')) all_cumlen = np.array(cum_num_lens) np.save(OUT_DIR + \"document\", documents) np.save(OUT_DIR", "from torch.autograd import Variable import torch.optim as optim import torch.nn.functional as F from", "logging.StreamHandler() console.setFormatter(fmt) logger.addHandler(console) if args.log_file: if args.checkpoint: logfile = logging.FileHandler(args.log_file, 'a') else: logfile", "= [] all_para_vectors = [] qid2idx = {} cum_num_lens = [] all_correct_ans =", "= pickle.load(fin) fin.close() if args.test == 1: fin = open(os.path.join(args.data_dir, args.src, \"data\", args.domain,", "= scores.reshape((-1)) if save_scores: for i, pid in enumerate(ex[-1]): corpus.paragraphs[pid].model_score = scores[i] scores", "logger.info(\"Loading done!\") logger.info(\"Num train examples {}\".format(len(all_train_exs.paragraphs))) logger.info(\"Num dev examples {}\".format(len(all_dev_exs.paragraphs))) if args.test ==", "logger = logging.getLogger() global_timer = utils.Timer() stats = {'timer': global_timer, 'epoch': 0, 'best_valid':", "assert all_question_vectors.shape[0] == len(all_correct_ans) ## saving code if train: OUT_DIR = os.path.join(args.save_dir, args.src,", "{} | iter={}/{} | para loss = {:2.4f}'.format( stats['epoch'], idx, len(train_loader), para_loss.avg)) para_loss.reset()", "Set random state np.random.seed(args.random_seed) torch.manual_seed(args.random_seed) if args.cuda: torch.cuda.manual_seed(args.random_seed) # Set logging logger.setLevel(logging.INFO) fmt", "] ==1: if i <= 4: top_5 += 1 if i <= 2:", "logging import shutil from tqdm import tqdm from torch.autograd import Variable import torch.optim", "= 0 for qid in corpus.questions: para_scores = [(corpus.paragraphs[pid].tfidf_score, corpus.paragraphs[pid].ans_occurance) for pid in", "all_test_exs, test_loader, verified_dev_loader=None) else: get_topk_tfidf(all_dev_exs) for epoch in range(args.num_epochs): stats['epoch'] = epoch train_binary_classification(args,", "optimizer = optim.Adamax(parameters, weight_decay=args.weight_decay) elif args.optimizer == 'nag': optimizer = NAG(parameters, args.learning_rate, momentum=args.momentum,", "# -------------------------------------------------------------------------- # TRAIN/VALID LOOP # -------------------------------------------------------------------------- # train parameters = ret.get_trainable_params() optimizer", "> 0: top1 += 1 if sum([ans[1] for ans in sorted_para_scores[:3]]) > 0:", "param values ret.model.load_state_dict(model['state_dict']['para_clf']) optimizer = None parameters = ret.get_trainable_params() if args.optimizer == 'sgd':", "cum_num_lens) def save_vectors(args, ret_model, corpus, data_loader, verified_dev_loader=None, save_scores = True, train=False, test=False): total_exs", "question_vectors, corpus, train, test) def get_topk(corpus): top1 = 0 top3 = 0 top5", "continue inputs = [e if e is None or type(e) != type(ex[0]) else", "setup environment args = config.get_args() # Set cuda args.cuda = not args.no_cuda and", "numpy as np import json import os import pickle import sys import logging", "args.feature_dict = None params['config'] = vars(args) if epoch: params['epoch'] = epoch try: torch.save(params,", "SequentialSampler(dataset) if not train_time else RandomSampler(dataset) loader = torch.utils.data.DataLoader( dataset, batch_size=args.batch_size, sampler=sampler, num_workers=args.data_workers,", "from model import utils, data, vector from model.retriever import LSTMRetriever from multi_corpus import", "len(cum_num_lens) assert all_question_vectors.shape[0] == len(qid2idx) assert all_question_vectors.shape[0] == len(all_correct_ans) ## saving code if", "= labels.cuda() # BCE logits loss batch_para_loss = F.binary_cross_entropy_with_logits(scores.squeeze(1), labels) optimizer.zero_grad() batch_para_loss.backward() torch.nn.utils.clip_grad_norm(ret_model.get_trainable_params(),", "(top_1 * 1.0 / total_num_questions), (top_3 * 1.0 / total_num_questions), (top_5 * 1.0", "config.get_args() # Set cuda args.cuda = not args.no_cuda and torch.cuda.is_available() if args.cuda: torch.cuda.set_device(args.gpu)", "i, no_paras in enumerate(ranked_para_ids): if labels[counter + no_paras ] ==1: if i <=", "+ \"document\", all_para_vectors) np.save(OUT_DIR + \"question\", all_question_vectors) np.save(OUT_DIR + \"all_cumlen\", cum_num_lens) def save_vectors(args,", "ret_model, optimizer, train_loader, verified_dev_loader=None) logger.info('checkpointing model at {}'.format(args.model_file)) ## check pointing## save(args, ret_model.model,", "total_exs = 0 args.train_time = False ret_model.model.eval() accuracy = 0.0 for idx, ex", "console = logging.StreamHandler() console.setFormatter(fmt) logger.addHandler(console) if args.log_file: if args.checkpoint: logfile = logging.FileHandler(args.log_file, 'a')", "values ret.model.load_state_dict(model['state_dict']['para_clf']) optimizer = None parameters = ret.get_trainable_params() if args.optimizer == 'sgd': optimizer", "= make_data_loader(args, all_train_exs, train_time=False) if args.eval_only else make_data_loader(args, all_train_exs, train_time=True) dev_loader = make_data_loader(args,", "dev paragraph vectors\") save_vectors(args, ret_model, all_dev_exs, dev_loader, verified_dev_loader=None) logger.info(\"Saving train paragraph vectors\") save_vectors(args,", "NAG(parameters, args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay) else: raise RuntimeError('Unsupported optimizer: %s' % args.optimizer) else: pass", "= logging.FileHandler(args.log_file, 'a') else: logfile = logging.FileHandler(args.log_file, 'w') logfile.setFormatter(fmt) logger.addHandler(logfile) logger.info('[ COMMAND: %s", "args.model_file+\".ckpt\", epoch=stats['epoch']) logger.info(\"Evaluating on the full dev set....\") top1 = eval_binary_classification(args, ret_model, all_dev_exs,", "} args.word_dict = None args.feature_dict = None params['config'] = vars(args) if epoch: params['epoch']", "sum(num_paras[:i]) for j in range(num_paras[i]): if labels[st+j] == 1: all_correct_answers[map_counter].append(j) ### Test case:", "* 1.0 / total_num_questions), (top_3 * 1.0 / total_num_questions), (top_5 * 1.0 /", "= \"/iesl/canvas/sdhuliawala/vectors_web/dev/\" if not os.path.exists(OUT_DIR): os.mkdir(OUT_DIR) else: shutil.rmtree(OUT_DIR, ignore_errors=True) os.mkdir(OUT_DIR) #Test cases assert", "if __name__ == '__main__': # MODEL logger.info('-' * 100) # Parse cmdline args", "import torch.nn.functional as F from torch.utils.data.sampler import RandomSampler import config from model import", "enumerate(corpus.questions): labels = [] all_question_vectors.append(question_vectors[qid]) qid2idx[qid] = question_i cum_num_len += len(corpus.questions[qid].pids) cum_num_lens.append(cum_num_len) for", "ret_model.score_paras(*ret_input) y_num_occurrences = Variable(ex[-2]) labels = (y_num_occurrences > 0).float() labels = labels.cuda() #", "else: logfile = logging.FileHandler(args.log_file, 'w') logfile.setFormatter(fmt) logger.addHandler(logfile) logger.info('[ COMMAND: %s ]' % '", "args.feature_dict } args.word_dict = None args.feature_dict = None params['config'] = vars(args) if epoch:", "all_train_exs, train_time=True) dev_loader = make_data_loader(args, all_dev_exs) if args.test: test_loader = make_data_loader(args, all_test_exs) if", "% args.optimizer) else: pass return ret, optimizer, word_dict, feature_dict def train_binary_classification(args, ret_model, optimizer,", "in corpus.questions: para_scores = [(corpus.paragraphs[pid].tfidf_score, corpus.paragraphs[pid].ans_occurance) for pid in corpus.questions[qid].pids] sorted_para_scores = sorted(para_scores,", "args.small == 1: args.test = 0 if args.small == 1: args.train_file_name = args.train_file_name", "utils.build_word_dict(args, train_exs) if word_dict is not None: args.vocab_size = len(word_dict) args.embedding_dim_orig = args.embedding_dim", "0 cum_num_lens = [] qid2idx = {} sum_num_paras = 0 all_correct_answers = {}", "+= num_paras[q_counter] logger.info('Accuracy of para classifier when evaluated on the annotated dev set.')", "== len(qid2idx) assert len(cum_num_lens) == len(all_correct_answers) json.dump(qid2idx, open(OUT_DIR + 'map.json', 'w')) json.dump(all_correct_answers, open(OUT_DIR", "set....\") top1 = eval_binary_classification(args, ret_model, all_dev_exs, dev_loader, verified_dev_loader=None) if stats['best_acc'] < top1: stats['best_acc']", "make_data_loader(args, all_test_exs) if args.eval_only: logger.info(\"Saving dev paragraph vectors\") save_vectors(args, ret_model, all_dev_exs, dev_loader, verified_dev_loader=None)", "'best_verified_acc': 0} def make_data_loader(args, corpus, train_time=False): dataset = data.MultiCorpusDataset( args, corpus, args.word_dict, args.feature_dict,", "#Test cases assert cum_num_lens[-1] == documents.shape[0] assert questions.shape[0] == documents.shape[0] assert len(cum_num_lens) ==", ") sampler = SequentialSampler(dataset) if not train_time else RandomSampler(dataset) loader = torch.utils.data.DataLoader( dataset,", "= [*inputs[:4]] total_exs += ex[0].size(0) scores, _, _ = ret_model.score_paras(*ret_input) scores = F.sigmoid(scores)", "try: torch.save(params, filename) # bad hack for not saving dictionary twice args.word_dict =", "all_correct_answers = {} for ex_counter, ex in tqdm(enumerate(data_loader)): ret_input = [*ex] y_num_occurrences =", "all_test_exs) if args.eval_only: logger.info(\"Saving dev paragraph vectors\") save_vectors(args, ret_model, all_dev_exs, dev_loader, verified_dev_loader=None) logger.info(\"Saving", "in ex[:]] ret_input = [*inputs[:4]] total_exs += ex[0].size(0) scores, doc, ques = ret_model.score_paras(*ret_input)", "epoch in range(args.num_epochs): stats['epoch'] = epoch train_binary_classification(args, ret_model, optimizer, train_loader, verified_dev_loader=None) logger.info('checkpointing model", "at {}'.format(args.model_file)) logger.info(\"Logs saved at {}\".format(args.log_file)) save(args, ret_model.model, optimizer, args.model_file, epoch=stats['epoch']) if __name__", "== '__main__': # MODEL logger.info('-' * 100) # Parse cmdline args and setup", "{} cum_num_len = 0 for question_i, qid in enumerate(corpus.questions): labels = [] all_question_vectors.append(question_vectors[qid])", "ex[:]] ret_input = [*inputs[:4]] total_exs += ex[0].size(0) scores, _, _ = ret_model.score_paras(*ret_input) scores", "TRAIN/VALID LOOP # -------------------------------------------------------------------------- # train parameters = ret.get_trainable_params() optimizer = None if", "Saving failed... continuing anyway. ]') # ------------------------------------------------------------------------------ # Main. # ------------------------------------------------------------------------------ def main(args):", "args.cuda = not args.no_cuda and torch.cuda.is_available() if args.cuda: torch.cuda.set_device(args.gpu) # Set random state", "args.word_dict = params['word_dict'] args.feature_dict = params['feature_dict'] except BaseException: logger.warn('[ WARN: Saving failed... continuing", "if epoch: params['epoch'] = epoch try: torch.save(params, filename) # bad hack for not", "= word_dict args.feature_dict = feature_dict ret = LSTMRetriever(args, word_dict, feature_dict) # load saved", "all_correct_ans[qid] = labels all_para_vectors = np.stack(all_para_vectors) all_question_vectors = np.stack(all_question_vectors) assert all_para_vectors.shape[0] == cum_num_lens[-1]", "idx, len(train_loader), para_loss.avg)) para_loss.reset() def eval_binary_classification(args, ret_model, corpus, dev_loader, verified_dev_loader=None, save_scores = True):", "sorted_para_scores[0][1] > 0: top1 += 1 if sum([ans[1] for ans in sorted_para_scores[:3]]) >", "assert len(all_correct_answers[map_counter]) == sum(labels.data.numpy()[st: st + num_paras[i]]) map_counter += 1 counter = 0", "[(corpus.paragraphs[pid].tfidf_score, corpus.paragraphs[pid].ans_occurance) for pid in corpus.questions[qid].pids] sorted_para_scores = sorted(para_scores, key=lambda x: x[0]) #", "{} sum_num_paras = 0 all_correct_answers = {} for ex_counter, ex in tqdm(enumerate(data_loader)): ret_input", "in corpus.questions: para_scores = [(corpus.paragraphs[pid].model_score,corpus.paragraphs[pid].ans_occurance ) for pid in corpus.questions[qid].pids] sorted_para_scores = sorted(para_scores,", "'best_verified_valid': 0, 'best_acc': 0, 'best_verified_acc': 0} def make_data_loader(args, corpus, train_time=False): dataset = data.MultiCorpusDataset(", "top3/len(corpus.questions) top5 = top5/len(corpus.questions) logger.info('top1 = {}, top3 = {}, top5 = {}", "\"document\", documents) np.save(OUT_DIR + \"question\", questions) np.save(OUT_DIR + \"all_cumlen\", all_cumlen) return (top_1 *", "failed... continuing anyway. ]') # ------------------------------------------------------------------------------ # Main. # ------------------------------------------------------------------------------ def main(args): #", "[(corpus.paragraphs[pid].model_score,corpus.paragraphs[pid].ans_occurance ) for pid in corpus.questions[qid].pids] sorted_para_scores = sorted(para_scores, key=lambda x: x[0], reverse=True)", "= F.binary_cross_entropy_with_logits(scores.squeeze(1), labels) optimizer.zero_grad() batch_para_loss.backward() torch.nn.utils.clip_grad_norm(ret_model.get_trainable_params(), 2.0) optimizer.step() para_loss.update(batch_para_loss.data.item()) if math.isnan(para_loss.avg): import pdb", "'epoch': 0, 'best_valid': 0, 'best_verified_valid': 0, 'best_acc': 0, 'best_verified_acc': 0} def make_data_loader(args, corpus,", "train_time=False) if args.eval_only else make_data_loader(args, all_train_exs, train_time=True) dev_loader = make_data_loader(args, all_dev_exs) if args.test:", "top5 = {} '.format(top1 / len(corpus.questions), top3 / len(corpus.questions), top5 / len(corpus.questions))) def", "args.test_file_name), \"rb\") all_test_exs = pickle.load(fin) fin.close() logger.info(\"Loading done!\") logger.info(\"Num train examples {}\".format(len(all_train_exs.paragraphs))) logger.info(\"Num", "logger.info(\"Making data loaders...\") if word_dict == None: args.word_dict = utils.build_word_dict(args, (all_train_exs, all_dev_exs)) word_dict", "ret, optimizer, word_dict, feature_dict def init_from_scratch(args, train_exs): logger.info('Initializing model from scratch') word_dict =", "ques.cpu().data.numpy() if ex_counter == 0: documents = docs questions = ques else: documents", "len(cum_num_lens) == len(all_correct_answers) json.dump(qid2idx, open(OUT_DIR + 'map.json', 'w')) json.dump(all_correct_answers, open(OUT_DIR + 'correct_paras.json', 'w'))", "print_vectors(args, para_vectors, question_vectors, corpus, train, test) def get_topk(corpus): top1 = 0 top3 =", "loader = torch.utils.data.DataLoader( dataset, batch_size=args.batch_size, sampler=sampler, num_workers=args.data_workers, collate_fn=vector.batchify(args, args.para_mode, train_time=train_time), pin_memory=True ) return", "import SequentialSampler, RandomSampler import math logger = logging.getLogger() global_timer = utils.Timer() stats =", "all_train_exs, train_time=False) if args.eval_only else make_data_loader(args, all_train_exs, train_time=True) dev_loader = make_data_loader(args, all_dev_exs) if", "torch import numpy as np import json import os import pickle import sys", "pdb pdb.set_trace() if idx % 25 == 0 and idx > 0: logger.info('Epoch", "= { 'state_dict': { 'para_clf': model.state_dict(), 'optimizer': optimizer.state_dict() }, 'word_dict': args.word_dict, 'feature_dict': args.feature_dict", "import RandomSampler import config from model import utils, data, vector from model.retriever import", "tqdm from torch.autograd import Variable import torch.optim as optim import torch.nn.functional as F", "feature_dict = init_from_checkpoint(args) # make data loader logger.info(\"Making data loaders...\") if word_dict ==", "utils.AverageMeter() ret_model.model.train() for idx, ex in enumerate(train_loader): if ex is None: continue inputs", "= 0 for qid in corpus.questions: para_scores = [(corpus.paragraphs[pid].model_score,corpus.paragraphs[pid].ans_occurance ) for pid in", "= False ret_model.model.eval() accuracy = 0.0 for idx, ex in enumerate(tqdm(dev_loader)): if ex", "# Set cuda args.cuda = not args.no_cuda and torch.cuda.is_available() if args.cuda: torch.cuda.set_device(args.gpu) #", "= data.MultiCorpusDataset( args, corpus, args.word_dict, args.feature_dict, single_answer=False, para_mode=args.para_mode, train_time=train_time ) sampler = SequentialSampler(dataset)", "+= a.sum() logger.info('Eval accuracy = {} '.format(accuracy/total_exs)) top1 = get_topk(corpus) return top1 def", "test paragraph vectors\") save_vectors(args, ret_model, all_test_exs, test_loader, verified_dev_loader=None) else: get_topk_tfidf(all_dev_exs) for epoch in", "eval_on_train_set=False): args.train_time = False top_1 = 0 top_3 = 0 top_5 = 0", "torch.utils.data.sampler import RandomSampler import config from model import utils, data, vector from model.retriever", "os.makedirs(OUT_DIR) else: shutil.rmtree(OUT_DIR, ignore_errors=True) os.makedirs(OUT_DIR) json.dump(qid2idx, open(OUT_DIR + 'map.json', 'w')) json.dump(all_correct_ans, open(OUT_DIR +", "train, test) def get_topk(corpus): top1 = 0 top3 = 0 top5 = 0", "data.MultiCorpusDataset( args, corpus, args.word_dict, args.feature_dict, single_answer=False, para_mode=args.para_mode, train_time=train_time ) sampler = SequentialSampler(dataset) if", "'w')) json.dump(all_correct_answers, open(OUT_DIR + 'correct_paras.json', 'w')) all_cumlen = np.array(cum_num_lens) np.save(OUT_DIR + \"document\", documents)", "enumerate(ex[-1]): para_vectors[pid] = doc[i] for i, qid in enumerate([corpus.paragraphs[pid].qid for pid in ex[-1]]):", "if e is None or type(e) != type(ex[0]) else Variable(e.cuda(async=True)) for e in", "num_paras = ex[1] qids = ex[-1] if args.save_para_clf_output: docs = docs.cpu().data.numpy() ques =", "[*inputs[:4]] scores, _, _ = ret_model.score_paras(*ret_input) y_num_occurrences = Variable(ex[-2]) labels = (y_num_occurrences >", "torch.nn.functional as F from torch.utils.data.sampler import RandomSampler import config from model import utils,", "logger.info('-' * 100) logger.info('CONFIG:\\n%s' % json.dumps(vars(args), indent=4, sort_keys=True)) # small can't test if", "> 0.5 a = scores == labels accuracy += a.sum() logger.info('Eval accuracy =", "= {} '.format(accuracy/total_exs)) top1 = get_topk(corpus) return top1 def print_vectors(args, para_vectors, question_vectors, corpus,", "os.path.join(args.save_dir, args.src, args.domain, \"train/\") else: if args.is_test == 0: OUT_DIR = os.path.join(args.save_dir, args.src,", "topk_paras, docs, ques = model.return_topk(5,*ret_input) except RuntimeError: import pdb pdb.set_trace() num_paras = ex[1]", "\"rb\") all_train_exs = pickle.load(fin) fin.close() fin = open(os.path.join(args.data_dir, args.src, \"data\", args.domain, args.dev_file_name), \"rb\")", "1: fin = open(os.path.join(args.data_dir, args.src, \"data\", args.domain, args.test_file_name), \"rb\") all_test_exs = pickle.load(fin) fin.close()", "i, qid in enumerate(qids): qid2idx[qid] = map_counter sum_num_paras += num_paras[i] cum_num_lens.append(sum_num_paras) all_correct_answers[map_counter] =", "args.test_file_name + \".pkl\" logger.info(\"Loading pickle files\") fin = open(os.path.join(args.data_dir, args.src, \"data\", args.domain, args.train_file_name),", "%I:%M:%S %p') console = logging.StreamHandler() console.setFormatter(fmt) logger.addHandler(console) if args.log_file: if args.checkpoint: logfile =", "top1 def print_vectors(args, para_vectors, question_vectors, corpus, train=False, test=False): all_question_vectors = [] all_para_vectors =", "pid in enumerate(corpus.questions[qid].pids): if corpus.paragraphs[pid].ans_occurance > 0: labels.append(para_i) all_para_vectors.append(para_vectors[pid]) all_correct_ans[qid] = labels all_para_vectors", "\"rb\") all_test_exs = pickle.load(fin) fin.close() logger.info(\"Loading done!\") logger.info(\"Num train examples {}\".format(len(all_train_exs.paragraphs))) logger.info(\"Num dev", "eval_binary_classification(args, ret_model, all_dev_exs, dev_loader, verified_dev_loader=None) if stats['best_acc'] < top1: stats['best_acc'] = top1 logger.info('Best", "raise RuntimeError('Unsupported optimizer: %s' % args.optimizer) optimizer.load_state_dict(model['state_dict']['optimizer']) logger.info('Model loaded...') return ret, optimizer, word_dict,", "else: if args.is_test == 0: OUT_DIR = os.path.join(args.save_dir, args.src, args.domain, \"dev/\") else: OUT_DIR", "if idx % 25 == 0 and idx > 0: logger.info('Epoch = {}", "% args.optimizer) optimizer.load_state_dict(model['state_dict']['optimizer']) logger.info('Model loaded...') return ret, optimizer, word_dict, feature_dict def init_from_scratch(args, train_exs):", "= scores[i] get_topk(corpus) print_vectors(args, para_vectors, question_vectors, corpus, train, test) def get_topk(corpus): top1 =", "optimizer, word_dict, feature_dict def train_binary_classification(args, ret_model, optimizer, train_loader, verified_dev_loader=None): args.train_time = True para_loss", "1 counter = 0 for q_counter, ranked_para_ids in enumerate(topk_paras): total_num_questions += 1 for", "parameters = ret.get_trainable_params() optimizer = None if parameters is not None and len(parameters)", "enumerate(ranked_para_ids): if labels[counter + no_paras ] ==1: if i <= 4: top_5 +=", "= {} sum_num_paras = 0 all_correct_answers = {} for ex_counter, ex in tqdm(enumerate(data_loader)):", "= None # create or get vocab word_dict = utils.build_word_dict(args, train_exs) if word_dict", "pickle.load(fin) fin.close() fin = open(os.path.join(args.data_dir, args.src, \"data\", args.domain, args.dev_file_name), \"rb\") all_dev_exs = pickle.load(fin)", "word_dict, feature_dict = init_from_checkpoint(args) # make data loader logger.info(\"Making data loaders...\") if word_dict", "np.save(OUT_DIR + \"question\", all_question_vectors) np.save(OUT_DIR + \"all_cumlen\", cum_num_lens) def save_vectors(args, ret_model, corpus, data_loader,", "(top_3 * 1.0 / total_num_questions), (top_5 * 1.0 / total_num_questions) def save(args, model,", "counter += num_paras[q_counter] logger.info('Accuracy of para classifier when evaluated on the annotated dev", "saved at {}\".format(args.log_file)) save(args, ret_model.model, optimizer, args.model_file, epoch=stats['epoch']) if __name__ == '__main__': #", "'sgd': optimizer = optim.SGD(parameters, args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay) elif args.optimizer == 'adamax': optimizer =", "type(e) != type(ex[0]) else Variable(e.cuda(async=True)) for e in ex[:]] ret_input = [*inputs[:4]] total_exs", "== documents.shape[0] assert questions.shape[0] == documents.shape[0] assert len(cum_num_lens) == len(qid2idx) assert len(cum_num_lens) ==", "labels.append(para_i) all_para_vectors.append(para_vectors[pid]) all_correct_ans[qid] = labels all_para_vectors = np.stack(all_para_vectors) all_question_vectors = np.stack(all_question_vectors) assert all_para_vectors.shape[0]", "math logger = logging.getLogger() global_timer = utils.Timer() stats = {'timer': global_timer, 'epoch': 0,", "if qid not in question_vectors: question_vectors[qid] = ques[i] for i, pid in enumerate(ex[-1]):", "(y_num_occurrences > 0) try: topk_paras, docs, ques = model.return_topk(5,*ret_input) except RuntimeError: import pdb", "cum_num_lens.append(sum_num_paras) all_correct_answers[map_counter] = [] st = sum(num_paras[:i]) for j in range(num_paras[i]): if labels[st+j]", "para_vectors, question_vectors, corpus, train=False, test=False): all_question_vectors = [] all_para_vectors = [] qid2idx =", "'map.json', 'w')) json.dump(all_correct_answers, open(OUT_DIR + 'correct_paras.json', 'w')) all_cumlen = np.array(cum_num_lens) np.save(OUT_DIR + \"document\",", "if not os.path.exists(OUT_DIR): os.mkdir(OUT_DIR) else: shutil.rmtree(OUT_DIR, ignore_errors=True) os.mkdir(OUT_DIR) #Test cases assert cum_num_lens[-1] ==", "assert questions.shape[0] == documents.shape[0] assert len(cum_num_lens) == len(qid2idx) assert len(cum_num_lens) == len(all_correct_answers) json.dump(qid2idx,", "sorted(para_scores, key=lambda x: x[0], reverse=True) if sorted_para_scores[0][1] > 0: top1 += 1 if", "_, _ = ret_model.score_paras(*ret_input) scores = F.sigmoid(scores) y_num_occurrences = Variable(ex[-2]) labels = (y_num_occurrences", "1.0 / total_num_questions), (top_5 * 1.0 / total_num_questions))) ## saving code if args.save_para_clf_output:", "= {} cum_num_lens = [] all_correct_ans = {} cum_num_len = 0 for question_i,", "sort_keys=True)) # small can't test if args.small == 1: args.test = 0 if", "if not train_time else RandomSampler(dataset) loader = torch.utils.data.DataLoader( dataset, batch_size=args.batch_size, sampler=sampler, num_workers=args.data_workers, collate_fn=vector.batchify(args,", "### create map and cum_num_lens for i, qid in enumerate(qids): qid2idx[qid] = map_counter", "1.0 / total_num_questions), (top_5 * 1.0 / total_num_questions) def save(args, model, optimizer, filename,", "[*inputs[:4]] total_exs += ex[0].size(0) scores, _, _ = ret_model.score_paras(*ret_input) scores = F.sigmoid(scores) y_num_occurrences", "at {}'.format(args.model_file)) ## check pointing## save(args, ret_model.model, optimizer, args.model_file+\".ckpt\", epoch=stats['epoch']) logger.info(\"Evaluating on the", "def train_binary_classification(args, ret_model, optimizer, train_loader, verified_dev_loader=None): args.train_time = True para_loss = utils.AverageMeter() ret_model.model.train()", "= 0 top5 = 0 for qid in corpus.questions: para_scores = [(corpus.paragraphs[pid].model_score,corpus.paragraphs[pid].ans_occurance )", "loader def init_from_checkpoint(args): logger.info('Loading model from saved checkpoint {}'.format(args.pretrained)) model = torch.load(args.pretrained) word_dict", "= epoch try: torch.save(params, filename) # bad hack for not saving dictionary twice", "len(parameters) > 0: if args.optimizer == 'sgd': optimizer = optim.SGD(parameters, args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay)", "Variable(e.cuda(async=True)) for e in ex[:]] ret_input = [*inputs[:4]] scores, _, _ = ret_model.score_paras(*ret_input)", "\"_small\" if args.test == 1: args.test_file_name = args.test_file_name + \"_small\" args.train_file_name = args.train_file_name", "save_vectors(args, ret_model, all_train_exs, train_loader, verified_dev_loader=None, train=True) if args.test: args.is_test = 1 logger.info(\"Saving test", "pdb # pdb.set_trace() if sorted_para_scores[0][1] > 0: top1 += 1 if sum([ans[1] for", "dev_loader = make_data_loader(args, all_dev_exs) if args.test: test_loader = make_data_loader(args, all_test_exs) if args.eval_only: logger.info(\"Saving", "get_topk(corpus): top1 = 0 top3 = 0 top5 = 0 for qid in", "train=False, test=False): all_question_vectors = [] all_para_vectors = [] qid2idx = {} cum_num_lens =", "para_loss = utils.AverageMeter() ret_model.model.train() for idx, ex in enumerate(train_loader): if ex is None:", "if word_dict is not None: args.vocab_size = len(word_dict) args.embedding_dim_orig = args.embedding_dim args.word_dict =", "optimizer = optim.SGD(parameters, args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay) elif args.optimizer == 'adamax': optimizer = optim.Adamax(parameters,", "train_loader = make_data_loader(args, all_train_exs, train_time=False) if args.eval_only else make_data_loader(args, all_train_exs, train_time=True) dev_loader =", "2.0) optimizer.step() para_loss.update(batch_para_loss.data.item()) if math.isnan(para_loss.avg): import pdb pdb.set_trace() if idx % 25 ==", "1 if sum([ans[1] for ans in sorted_para_scores[:5]]) > 0: top5 += 1 logger.info(", "= {}, top3 = {}, top5 = {} '.format(top1, top3 ,top5 )) return", "scores, _, _ = ret_model.score_paras(*ret_input) y_num_occurrences = Variable(ex[-2]) labels = (y_num_occurrences > 0).float()", "# Set random state np.random.seed(args.random_seed) torch.manual_seed(args.random_seed) if args.cuda: torch.cuda.manual_seed(args.random_seed) # Set logging logger.setLevel(logging.INFO)", "not None and len(parameters) > 0: if args.optimizer == 'sgd': optimizer = optim.SGD(parameters,", "> 0: top5 += 1 top1 = top1/len(corpus.questions) top3 = top3/len(corpus.questions) top5 =", "utils.Timer() stats = {'timer': global_timer, 'epoch': 0, 'best_valid': 0, 'best_verified_valid': 0, 'best_acc': 0,", "qid2idx[qid] = map_counter sum_num_paras += num_paras[i] cum_num_lens.append(sum_num_paras) all_correct_answers[map_counter] = [] st = sum(num_paras[:i])", "% json.dumps(vars(args), indent=4, sort_keys=True)) # small can't test if args.small == 1: args.test", "F.binary_cross_entropy_with_logits(scores.squeeze(1), labels) optimizer.zero_grad() batch_para_loss.backward() torch.nn.utils.clip_grad_norm(ret_model.get_trainable_params(), 2.0) optimizer.step() para_loss.update(batch_para_loss.data.item()) if math.isnan(para_loss.avg): import pdb pdb.set_trace()", "return ret, optimizer, word_dict, feature_dict def train_binary_classification(args, ret_model, optimizer, train_loader, verified_dev_loader=None): args.train_time =", "ans in sorted_para_scores[:3]]) > 0: top3 += 1 if sum([ans[1] for ans in", "ret_input = [*inputs[:4]] total_exs += ex[0].size(0) scores, _, _ = ret_model.score_paras(*ret_input) scores =", "fin.close() fin = open(os.path.join(args.data_dir, args.src, \"data\", args.domain, args.dev_file_name), \"rb\") all_dev_exs = pickle.load(fin) fin.close()", "doc[i] for i, qid in enumerate([corpus.paragraphs[pid].qid for pid in ex[-1]]): if qid not", "bad hack for not saving dictionary twice args.word_dict = params['word_dict'] args.feature_dict = params['feature_dict']", "if sum([ans[1] for ans in sorted_para_scores[:5]]) > 0: top5 += 1 logger.info( 'top1", "fin = open(os.path.join(args.data_dir, args.src, \"data\", args.domain, args.dev_file_name), \"rb\") all_dev_exs = pickle.load(fin) fin.close() if", "= logging.FileHandler(args.log_file, 'w') logfile.setFormatter(fmt) logger.addHandler(logfile) logger.info('[ COMMAND: %s ]' % ' '.join(sys.argv)) #", "logger.info('CONFIG:\\n%s' % json.dumps(vars(args), indent=4, sort_keys=True)) # small can't test if args.small == 1:", "'optimizer': optimizer.state_dict() }, 'word_dict': args.word_dict, 'feature_dict': args.feature_dict } args.word_dict = None args.feature_dict =", "args.eval_only else make_data_loader(args, all_train_exs, train_time=True) dev_loader = make_data_loader(args, all_dev_exs) if args.test: test_loader =", "top3 = top3/len(corpus.questions) top5 = top5/len(corpus.questions) logger.info('top1 = {}, top3 = {}, top5", "from tqdm import tqdm from torch.autograd import Variable import torch.optim as optim import", "def eval_binary_classification(args, ret_model, corpus, dev_loader, verified_dev_loader=None, save_scores = True): total_exs = 0 args.train_time", "= top3/len(corpus.questions) top5 = top5/len(corpus.questions) logger.info('top1 = {}, top3 = {}, top5 =", "pickle.load(fin) fin.close() logger.info(\"Loading done!\") logger.info(\"Num train examples {}\".format(len(all_train_exs.paragraphs))) logger.info(\"Num dev examples {}\".format(len(all_dev_exs.paragraphs))) if", "global_timer, 'epoch': 0, 'best_valid': 0, 'best_verified_valid': 0, 'best_acc': 0, 'best_verified_acc': 0} def make_data_loader(args,", "ret_model, all_test_exs, test_loader, verified_dev_loader=None) else: get_topk_tfidf(all_dev_exs) for epoch in range(args.num_epochs): stats['epoch'] = epoch", "stats['best_acc'] = top1 logger.info('Best accuracy {}'.format(stats['best_acc'])) logger.info('Saving model at {}'.format(args.model_file)) logger.info(\"Logs saved at", "if labels[st+j] == 1: all_correct_answers[map_counter].append(j) ### Test case: assert len(all_correct_answers[map_counter]) == sum(labels.data.numpy()[st: st", "dev examples {}\".format(len(all_dev_exs.paragraphs))) if args.test == 1: logger.info(\"Num test examples {}\".format(len(all_test_exs.paragraphs))) if args.pretrained", "in range(num_paras[i]): if labels[st+j] == 1: all_correct_answers[map_counter].append(j) ### Test case: assert len(all_correct_answers[map_counter]) ==", "make_data_loader(args, all_dev_exs) if args.test: test_loader = make_data_loader(args, all_test_exs) if args.eval_only: logger.info(\"Saving dev paragraph", "def main(args): # PRINT CONFIG logger.info('-' * 100) logger.info('CONFIG:\\n%s' % json.dumps(vars(args), indent=4, sort_keys=True))", "= make_data_loader(args, all_test_exs) if args.eval_only: logger.info(\"Saving dev paragraph vectors\") save_vectors(args, ret_model, all_dev_exs, dev_loader,", "= model['word_dict'] feature_dict = model['feature_dict'] args.vocab_size = len(word_dict) args.embedding_dim_orig = args.embedding_dim args.word_dict =", "cum_num_lens.append(cum_num_len) for para_i, pid in enumerate(corpus.questions[qid].pids): if corpus.paragraphs[pid].ans_occurance > 0: labels.append(para_i) all_para_vectors.append(para_vectors[pid]) all_correct_ans[qid]", "len(qid2idx) assert len(cum_num_lens) == len(all_correct_answers) json.dump(qid2idx, open(OUT_DIR + 'map.json', 'w')) json.dump(all_correct_answers, open(OUT_DIR +", "pdb pdb.set_trace() num_paras = ex[1] qids = ex[-1] if args.save_para_clf_output: docs = docs.cpu().data.numpy()", "= scores[i] scores = scores > 0.5 a = scores == labels accuracy", ",top5 )) return top1 def get_topk_tfidf(corpus): top1 = 0 top3 = 0 top5", "continuing anyway. ]') # ------------------------------------------------------------------------------ # Main. # ------------------------------------------------------------------------------ def main(args): # PRINT", "= None if parameters is not None and len(parameters) > 0: if args.optimizer", "optimizer.state_dict() }, 'word_dict': args.word_dict, 'feature_dict': args.feature_dict } args.word_dict = None args.feature_dict = None", "1: args.test_file_name = args.test_file_name + \".pkl\" logger.info(\"Loading pickle files\") fin = open(os.path.join(args.data_dir, args.src,", "args.domain, args.test_file_name), \"rb\") all_test_exs = pickle.load(fin) fin.close() logger.info(\"Loading done!\") logger.info(\"Num train examples {}\".format(len(all_train_exs.paragraphs)))", "idx > 0: logger.info('Epoch = {} | iter={}/{} | para loss = {:2.4f}'.format(", "else: OUT_DIR = os.path.join(args.save_dir, args.src, args.domain, \"test/\") logger.info(\"Printing vectors at {}\".format(OUT_DIR)) if not", "all_para_vectors.append(para_vectors[pid]) all_correct_ans[qid] = labels all_para_vectors = np.stack(all_para_vectors) all_question_vectors = np.stack(all_question_vectors) assert all_para_vectors.shape[0] ==", "------------------------------------------------------------------------------ def main(args): # PRINT CONFIG logger.info('-' * 100) logger.info('CONFIG:\\n%s' % json.dumps(vars(args), indent=4,", "args.test_file_name = args.test_file_name + \".pkl\" logger.info(\"Loading pickle files\") fin = open(os.path.join(args.data_dir, args.src, \"data\",", "args.train_file_name + \".pkl\" args.dev_file_name = args.dev_file_name + \".pkl\" if args.test == 1: args.test_file_name", "on the full dev set....\") top1 = eval_binary_classification(args, ret_model, all_dev_exs, dev_loader, verified_dev_loader=None) if", "'__main__': # MODEL logger.info('-' * 100) # Parse cmdline args and setup environment", "all_dev_exs, dev_loader, verified_dev_loader=None) logger.info(\"Saving train paragraph vectors\") save_vectors(args, ret_model, all_train_exs, train_loader, verified_dev_loader=None, train=True)", "args.word_dict, args.feature_dict, single_answer=False, para_mode=args.para_mode, train_time=train_time ) sampler = SequentialSampler(dataset) if not train_time else", "= 0 for question_i, qid in enumerate(corpus.questions): labels = [] all_question_vectors.append(question_vectors[qid]) qid2idx[qid] =", "docs.cpu().data.numpy() ques = ques.cpu().data.numpy() if ex_counter == 0: documents = docs questions =", "optimizer, args.model_file, epoch=stats['epoch']) if __name__ == '__main__': # MODEL logger.info('-' * 100) #", "documents) np.save(OUT_DIR + \"question\", questions) np.save(OUT_DIR + \"all_cumlen\", all_cumlen) return (top_1 * 1.0", "corpus.paragraphs[pid].model_score = scores[i] scores = scores > 0.5 a = scores == labels", "corpus.questions[qid].pids] sorted_para_scores = sorted(para_scores, key=lambda x: x[0]) # import pdb # pdb.set_trace() if", "top3 = {}, top5 = {} '.format(top1 / len(corpus.questions), top3 / len(corpus.questions), top5", "idx % 25 == 0 and idx > 0: logger.info('Epoch = {} |", "LSTMRetriever(args, word_dict, feature_dict) # load saved param values ret.model.load_state_dict(model['state_dict']['para_clf']) optimizer = None parameters", "in range(args.num_epochs): stats['epoch'] = epoch train_binary_classification(args, ret_model, optimizer, train_loader, verified_dev_loader=None) logger.info('checkpointing model at", "train_time else RandomSampler(dataset) loader = torch.utils.data.DataLoader( dataset, batch_size=args.batch_size, sampler=sampler, num_workers=args.data_workers, collate_fn=vector.batchify(args, args.para_mode, train_time=train_time),", "vectors\") save_vectors(args, ret_model, all_train_exs, train_loader, verified_dev_loader=None, train=True) if args.test: args.is_test = 1 logger.info(\"Saving", "get vocab word_dict = utils.build_word_dict(args, train_exs) if word_dict is not None: args.vocab_size =", "+= ex[0].size(0) scores, doc, ques = ret_model.score_paras(*ret_input) scores = scores.cpu().data.numpy() scores = scores.reshape((-1))", "top5 += 1 logger.info( 'top1 = {}, top3 = {}, top5 = {}", "all_test_exs = pickle.load(fin) fin.close() logger.info(\"Loading done!\") logger.info(\"Num train examples {}\".format(len(all_train_exs.paragraphs))) logger.info(\"Num dev examples", "model.retriever import LSTMRetriever from multi_corpus import MultiCorpus from torch.utils.data.sampler import SequentialSampler, RandomSampler import", "ex in enumerate(train_loader): if ex is None: continue inputs = [e if e", "if args.small == 1: args.train_file_name = args.train_file_name + \"_small\" args.dev_file_name = args.dev_file_name +", "args.dev_file_name), \"rb\") all_dev_exs = pickle.load(fin) fin.close() if args.test == 1: fin = open(os.path.join(args.data_dir,", "all_para_vectors = np.stack(all_para_vectors) all_question_vectors = np.stack(all_question_vectors) assert all_para_vectors.shape[0] == cum_num_lens[-1] assert all_question_vectors.shape[0] ==", "qid in corpus.questions: para_scores = [(corpus.paragraphs[pid].tfidf_score, corpus.paragraphs[pid].ans_occurance) for pid in corpus.questions[qid].pids] sorted_para_scores =", "= 0 all_correct_answers = {} for ex_counter, ex in tqdm(enumerate(data_loader)): ret_input = [*ex]", "= {} for ex_counter, ex in tqdm(enumerate(data_loader)): ret_input = [*ex] y_num_occurrences = ex[3]", "+= 1 top1 = top1/len(corpus.questions) top3 = top3/len(corpus.questions) top5 = top5/len(corpus.questions) logger.info('top1 =", "utils.build_word_dict(args, (all_train_exs, all_dev_exs)) word_dict = args.word_dict train_loader = make_data_loader(args, all_train_exs, train_time=False) if args.eval_only", "= \"/iesl/canvas/sdhuliawala/vectors_web/train/\" else: OUT_DIR = \"/iesl/canvas/sdhuliawala/vectors_web/dev/\" if not os.path.exists(OUT_DIR): os.mkdir(OUT_DIR) else: shutil.rmtree(OUT_DIR, ignore_errors=True)", "args.checkpoint: logfile = logging.FileHandler(args.log_file, 'a') else: logfile = logging.FileHandler(args.log_file, 'w') logfile.setFormatter(fmt) logger.addHandler(logfile) logger.info('[", "= not args.no_cuda and torch.cuda.is_available() if args.cuda: torch.cuda.set_device(args.gpu) # Set random state np.random.seed(args.random_seed)", "data_loader, verified_dev_loader=None, save_scores = True, train=False, test=False): total_exs = 0 args.train_time = False", "feature_dict = None # create or get vocab word_dict = utils.build_word_dict(args, train_exs) if", "== 'adamax': optimizer = optim.Adamax(parameters, weight_decay=args.weight_decay) elif args.optimizer == 'nag': optimizer = NAG(parameters,", "if args.eval_only else make_data_loader(args, all_train_exs, train_time=True) dev_loader = make_data_loader(args, all_dev_exs) if args.test: test_loader", "cum_num_lens[-1] assert all_question_vectors.shape[0] == len(cum_num_lens) assert all_question_vectors.shape[0] == len(qid2idx) assert all_question_vectors.shape[0] == len(all_correct_ans)", "indent=4, sort_keys=True)) # small can't test if args.small == 1: args.test = 0", "args.train_file_name), \"rb\") all_train_exs = pickle.load(fin) fin.close() fin = open(os.path.join(args.data_dir, args.src, \"data\", args.domain, args.dev_file_name),", "OUT_DIR = \"/iesl/canvas/sdhuliawala/vectors_web/train/\" else: OUT_DIR = \"/iesl/canvas/sdhuliawala/vectors_web/dev/\" if not os.path.exists(OUT_DIR): os.mkdir(OUT_DIR) else: shutil.rmtree(OUT_DIR,", "'.format(top1, top3 ,top5 )) return top1 def get_topk_tfidf(corpus): top1 = 0 top3 =", "False ret_model.model.eval() accuracy = 0.0 for idx, ex in enumerate(tqdm(dev_loader)): if ex is", "WARN: Saving failed... continuing anyway. ]') # ------------------------------------------------------------------------------ # Main. # ------------------------------------------------------------------------------ def", "logits loss batch_para_loss = F.binary_cross_entropy_with_logits(scores.squeeze(1), labels) optimizer.zero_grad() batch_para_loss.backward() torch.nn.utils.clip_grad_norm(ret_model.get_trainable_params(), 2.0) optimizer.step() para_loss.update(batch_para_loss.data.item()) if", "args.train_time = False ret_model.model.eval() accuracy = 0.0 for idx, ex in enumerate(tqdm(dev_loader)): if", "# load saved param values ret.model.load_state_dict(model['state_dict']['para_clf']) optimizer = None parameters = ret.get_trainable_params() if", "params['epoch'] = epoch try: torch.save(params, filename) # bad hack for not saving dictionary", "BCE logits loss batch_para_loss = F.binary_cross_entropy_with_logits(scores.squeeze(1), labels) optimizer.zero_grad() batch_para_loss.backward() torch.nn.utils.clip_grad_norm(ret_model.get_trainable_params(), 2.0) optimizer.step() para_loss.update(batch_para_loss.data.item())", "vectors\") save_vectors(args, ret_model, all_dev_exs, dev_loader, verified_dev_loader=None) logger.info(\"Saving train paragraph vectors\") save_vectors(args, ret_model, all_train_exs,", "args.word_dict train_loader = make_data_loader(args, all_train_exs, train_time=False) if args.eval_only else make_data_loader(args, all_train_exs, train_time=True) dev_loader", "0: labels.append(para_i) all_para_vectors.append(para_vectors[pid]) all_correct_ans[qid] = labels all_para_vectors = np.stack(all_para_vectors) all_question_vectors = np.stack(all_question_vectors) assert", "cum_num_lens = [] qid2idx = {} sum_num_paras = 0 all_correct_answers = {} for", "'correct_paras.json', 'w')) all_cumlen = np.array(cum_num_lens) np.save(OUT_DIR + \"document\", documents) np.save(OUT_DIR + \"question\", questions)", "set.') logger.info('top-1: {:2.4f}, top-3: {:2.4f}, top-5: {:2.4f}'.format( (top_1 * 1.0 / total_num_questions), (top_3", "\"data\", args.domain, args.train_file_name), \"rb\") all_train_exs = pickle.load(fin) fin.close() fin = open(os.path.join(args.data_dir, args.src, \"data\",", "{}\".format(args.log_file)) save(args, ret_model.model, optimizer, args.model_file, epoch=stats['epoch']) if __name__ == '__main__': # MODEL logger.info('-'", "cum_num_lens for i, qid in enumerate(qids): qid2idx[qid] = map_counter sum_num_paras += num_paras[i] cum_num_lens.append(sum_num_paras)", "np.array(cum_num_lens) np.save(OUT_DIR + \"document\", documents) np.save(OUT_DIR + \"question\", questions) np.save(OUT_DIR + \"all_cumlen\", all_cumlen)", "ques = ques.cpu().data.numpy() if ex_counter == 0: documents = docs questions = ques", "\"all_cumlen\", cum_num_lens) def save_vectors(args, ret_model, corpus, data_loader, verified_dev_loader=None, save_scores = True, train=False, test=False):", "+ \"document\", documents) np.save(OUT_DIR + \"question\", questions) np.save(OUT_DIR + \"all_cumlen\", all_cumlen) return (top_1", "!= type(ex[0]) else Variable(e.cuda(async=True)) for e in ex[:]] ret_input = [*inputs[:4]] total_exs +=", "args.train_time = True para_loss = utils.AverageMeter() ret_model.model.train() for idx, ex in enumerate(train_loader): if", "feature_dict ret = LSTMRetriever(args, word_dict, feature_dict) # -------------------------------------------------------------------------- # TRAIN/VALID LOOP # --------------------------------------------------------------------------", "docs, ques = model.return_topk(5,*ret_input) except RuntimeError: import pdb pdb.set_trace() num_paras = ex[1] qids", "logger.info( 'top1 = {}, top3 = {}, top5 = {} '.format(top1 / len(corpus.questions),", "all_para_vectors = [] qid2idx = {} cum_num_lens = [] all_correct_ans = {} cum_num_len", "else RandomSampler(dataset) loader = torch.utils.data.DataLoader( dataset, batch_size=args.batch_size, sampler=sampler, num_workers=args.data_workers, collate_fn=vector.batchify(args, args.para_mode, train_time=train_time), pin_memory=True", "question_i, qid in enumerate(corpus.questions): labels = [] all_question_vectors.append(question_vectors[qid]) qid2idx[qid] = question_i cum_num_len +=", "+ no_paras ] ==1: if i <= 4: top_5 += 1 if i", "= scores.cpu().data.numpy() scores = scores.reshape((-1)) if save_scores: for i, pid in enumerate(ex[-1]): corpus.paragraphs[pid].model_score", "save_vectors(args, ret_model, all_dev_exs, dev_loader, verified_dev_loader=None) logger.info(\"Saving train paragraph vectors\") save_vectors(args, ret_model, all_train_exs, train_loader,", "import pickle import sys import logging import shutil from tqdm import tqdm from", "% 25 == 0 and idx > 0: logger.info('Epoch = {} | iter={}/{}", "ans in sorted_para_scores[:5]]) > 0: top5 += 1 top1 = top1/len(corpus.questions) top3 =", "ret_model, all_train_exs, train_loader, verified_dev_loader=None, train=True) if args.test: args.is_test = 1 logger.info(\"Saving test paragraph", "os.mkdir(OUT_DIR) else: shutil.rmtree(OUT_DIR, ignore_errors=True) os.mkdir(OUT_DIR) #Test cases assert cum_num_lens[-1] == documents.shape[0] assert questions.shape[0]", "[] all_correct_ans = {} cum_num_len = 0 for question_i, qid in enumerate(corpus.questions): labels", "scores = scores.reshape((-1)) if save_scores: for i, pid in enumerate(ex[-1]): corpus.paragraphs[pid].model_score = scores[i]", "0, 'best_verified_valid': 0, 'best_acc': 0, 'best_verified_acc': 0} def make_data_loader(args, corpus, train_time=False): dataset =", "i <= 2: top_3 += 1 if i <= 0: top_1 += 1", "questions.shape[0] == documents.shape[0] assert len(cum_num_lens) == len(qid2idx) assert len(cum_num_lens) == len(all_correct_answers) json.dump(qid2idx, open(OUT_DIR", "iter={}/{} | para loss = {:2.4f}'.format( stats['epoch'], idx, len(train_loader), para_loss.avg)) para_loss.reset() def eval_binary_classification(args,", "0, 'best_acc': 0, 'best_verified_acc': 0} def make_data_loader(args, corpus, train_time=False): dataset = data.MultiCorpusDataset( args,", "return (top_1 * 1.0 / total_num_questions), (top_3 * 1.0 / total_num_questions), (top_5 *", "epoch=stats['epoch']) logger.info(\"Evaluating on the full dev set....\") top1 = eval_binary_classification(args, ret_model, all_dev_exs, dev_loader,", "in enumerate(tqdm(dev_loader)): if ex is None: raise BrokenPipeError inputs = [e if e", "{:2.4f}, top-5: {:2.4f}'.format( (top_1 * 1.0 / total_num_questions), (top_3 * 1.0 / total_num_questions),", "/ len(corpus.questions), top5 / len(corpus.questions))) def run_predictions(args, data_loader, model, eval_on_train_set=False): args.train_time = False", "0: if args.optimizer == 'sgd': optimizer = optim.SGD(parameters, args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay) elif args.optimizer", "args.no_cuda and torch.cuda.is_available() if args.cuda: torch.cuda.set_device(args.gpu) # Set random state np.random.seed(args.random_seed) torch.manual_seed(args.random_seed) if", "check pointing## save(args, ret_model.model, optimizer, args.model_file+\".ckpt\", epoch=stats['epoch']) logger.info(\"Evaluating on the full dev set....\")", "1 logger.info( 'top1 = {}, top3 = {}, top5 = {} '.format(top1 /", "pdb.set_trace() num_paras = ex[1] qids = ex[-1] if args.save_para_clf_output: docs = docs.cpu().data.numpy() ques", "# TRAIN/VALID LOOP # -------------------------------------------------------------------------- # train parameters = ret.get_trainable_params() optimizer = None", "parameters = ret.get_trainable_params() if args.optimizer == 'sgd': optimizer = optim.SGD(parameters, args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay)", "counter = 0 for q_counter, ranked_para_ids in enumerate(topk_paras): total_num_questions += 1 for i,", "args.word_dict = word_dict args.feature_dict = feature_dict ret = LSTMRetriever(args, word_dict, feature_dict) # load", "pid in enumerate(ex[-1]): para_vectors[pid] = doc[i] for i, qid in enumerate([corpus.paragraphs[pid].qid for pid", "top_5 = 0 total_num_questions = 0 map_counter = 0 cum_num_lens = [] qid2idx", "data loader logger.info(\"Making data loaders...\") if word_dict == None: args.word_dict = utils.build_word_dict(args, (all_train_exs,", "vectors at {}\".format(OUT_DIR)) if not os.path.exists(OUT_DIR): os.makedirs(OUT_DIR) else: shutil.rmtree(OUT_DIR, ignore_errors=True) os.makedirs(OUT_DIR) json.dump(qid2idx, open(OUT_DIR", "return ret, optimizer, word_dict, feature_dict def init_from_scratch(args, train_exs): logger.info('Initializing model from scratch') word_dict", "= top5/len(corpus.questions) logger.info('top1 = {}, top3 = {}, top5 = {} '.format(top1, top3", "import pdb pdb.set_trace() if idx % 25 == 0 and idx > 0:", "= [] qid2idx = {} sum_num_paras = 0 all_correct_answers = {} for ex_counter,", "\"dev/\") else: OUT_DIR = os.path.join(args.save_dir, args.src, args.domain, \"test/\") logger.info(\"Printing vectors at {}\".format(OUT_DIR)) if", "make_data_loader(args, all_train_exs, train_time=False) if args.eval_only else make_data_loader(args, all_train_exs, train_time=True) dev_loader = make_data_loader(args, all_dev_exs)", "evaluated on the annotated dev set.') logger.info('top-1: {:2.4f}, top-3: {:2.4f}, top-5: {:2.4f}'.format( (top_1", "examples {}\".format(len(all_dev_exs.paragraphs))) if args.test == 1: logger.info(\"Num test examples {}\".format(len(all_test_exs.paragraphs))) if args.pretrained is", "'top1 = {}, top3 = {}, top5 = {} '.format(top1 / len(corpus.questions), top3", "= model.return_topk(5,*ret_input) except RuntimeError: import pdb pdb.set_trace() num_paras = ex[1] qids = ex[-1]", "1 if sum([ans[1] for ans in sorted_para_scores[:3]]) > 0: top3 += 1 if", "optimizer, train_loader, verified_dev_loader=None): args.train_time = True para_loss = utils.AverageMeter() ret_model.model.train() for idx, ex", "ex[-1]]): if qid not in question_vectors: question_vectors[qid] = ques[i] for i, pid in", "# Parse cmdline args and setup environment args = config.get_args() # Set cuda", "logger.setLevel(logging.INFO) fmt = logging.Formatter('%(asctime)s: %(message)s', '%m/%d/%Y %I:%M:%S %p') console = logging.StreamHandler() console.setFormatter(fmt) logger.addHandler(console)", "_ = ret_model.score_paras(*ret_input) y_num_occurrences = Variable(ex[-2]) labels = (y_num_occurrences > 0).float() labels =", "feature_dict def train_binary_classification(args, ret_model, optimizer, train_loader, verified_dev_loader=None): args.train_time = True para_loss = utils.AverageMeter()", "sum(labels.data.numpy()[st: st + num_paras[i]]) map_counter += 1 counter = 0 for q_counter, ranked_para_ids", "scores[i] get_topk(corpus) print_vectors(args, para_vectors, question_vectors, corpus, train, test) def get_topk(corpus): top1 = 0", "qids = ex[-1] if args.save_para_clf_output: docs = docs.cpu().data.numpy() ques = ques.cpu().data.numpy() if ex_counter", "pickle import sys import logging import shutil from tqdm import tqdm from torch.autograd", "not in question_vectors: question_vectors[qid] = ques[i] for i, pid in enumerate(ex[-1]): corpus.paragraphs[pid].model_score =", "the full dev set....\") top1 = eval_binary_classification(args, ret_model, all_dev_exs, dev_loader, verified_dev_loader=None) if stats['best_acc']", "top3 / len(corpus.questions), top5 / len(corpus.questions))) def run_predictions(args, data_loader, model, eval_on_train_set=False): args.train_time =", "= True): total_exs = 0 args.train_time = False ret_model.model.eval() accuracy = 0.0 for", "== 'nag': optimizer = NAG(parameters, args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay) else: raise RuntimeError('Unsupported optimizer: %s'", "a = scores == labels accuracy += a.sum() logger.info('Eval accuracy = {} '.format(accuracy/total_exs))", "optimizer, filename, epoch=None): params = { 'state_dict': { 'para_clf': model.state_dict(), 'optimizer': optimizer.state_dict() },", "i, pid in enumerate(ex[-1]): para_vectors[pid] = doc[i] for i, qid in enumerate([corpus.paragraphs[pid].qid for", "args.domain, \"dev/\") else: OUT_DIR = os.path.join(args.save_dir, args.src, args.domain, \"test/\") logger.info(\"Printing vectors at {}\".format(OUT_DIR))", "corpus, train, test) def get_topk(corpus): top1 = 0 top3 = 0 top5 =", "question_vectors, corpus, train=False, test=False): all_question_vectors = [] all_para_vectors = [] qid2idx = {}", "args.cuda: torch.cuda.set_device(args.gpu) # Set random state np.random.seed(args.random_seed) torch.manual_seed(args.random_seed) if args.cuda: torch.cuda.manual_seed(args.random_seed) # Set", "0: OUT_DIR = os.path.join(args.save_dir, args.src, args.domain, \"dev/\") else: OUT_DIR = os.path.join(args.save_dir, args.src, args.domain,", "np.array(cum_num_lens) np.save(OUT_DIR + \"document\", all_para_vectors) np.save(OUT_DIR + \"question\", all_question_vectors) np.save(OUT_DIR + \"all_cumlen\", cum_num_lens)", "verified_dev_loader=None) logger.info(\"Saving train paragraph vectors\") save_vectors(args, ret_model, all_train_exs, train_loader, verified_dev_loader=None, train=True) if args.test:", "'correct_paras.json', 'w')) all_cumlen = np.array(cum_num_lens) np.save(OUT_DIR + \"document\", all_para_vectors) np.save(OUT_DIR + \"question\", all_question_vectors)", "in enumerate(tqdm(data_loader)): if ex is None: raise BrokenPipeError inputs = [e if e", "\"test/\") logger.info(\"Printing vectors at {}\".format(OUT_DIR)) if not os.path.exists(OUT_DIR): os.makedirs(OUT_DIR) else: shutil.rmtree(OUT_DIR, ignore_errors=True) os.makedirs(OUT_DIR)", "shutil.rmtree(OUT_DIR, ignore_errors=True) os.makedirs(OUT_DIR) json.dump(qid2idx, open(OUT_DIR + 'map.json', 'w')) json.dump(all_correct_ans, open(OUT_DIR + 'correct_paras.json', 'w'))", "shutil from tqdm import tqdm from torch.autograd import Variable import torch.optim as optim", "get_topk(corpus) print_vectors(args, para_vectors, question_vectors, corpus, train, test) def get_topk(corpus): top1 = 0 top3", "for i, pid in enumerate(ex[-1]): corpus.paragraphs[pid].model_score = scores[i] get_topk(corpus) print_vectors(args, para_vectors, question_vectors, corpus,", "= 0 total_num_questions = 0 map_counter = 0 cum_num_lens = [] qid2idx =", "for idx, ex in enumerate(tqdm(data_loader)): if ex is None: raise BrokenPipeError inputs =", "pid in enumerate(ex[-1]): corpus.paragraphs[pid].model_score = scores[i] get_topk(corpus) print_vectors(args, para_vectors, question_vectors, corpus, train, test)", "accuracy {}'.format(stats['best_acc'])) logger.info('Saving model at {}'.format(args.model_file)) logger.info(\"Logs saved at {}\".format(args.log_file)) save(args, ret_model.model, optimizer,", "enumerate(corpus.questions[qid].pids): if corpus.paragraphs[pid].ans_occurance > 0: labels.append(para_i) all_para_vectors.append(para_vectors[pid]) all_correct_ans[qid] = labels all_para_vectors = np.stack(all_para_vectors)", "0) try: topk_paras, docs, ques = model.return_topk(5,*ret_input) except RuntimeError: import pdb pdb.set_trace() num_paras", "= True para_loss = utils.AverageMeter() ret_model.model.train() for idx, ex in enumerate(train_loader): if ex", "import pdb # pdb.set_trace() if sorted_para_scores[0][1] > 0: top1 += 1 if sum([ans[1]", "epoch try: torch.save(params, filename) # bad hack for not saving dictionary twice args.word_dict", "# -------------------------------------------------------------------------- # train parameters = ret.get_trainable_params() optimizer = None if parameters is", "0 top3 = 0 top5 = 0 for qid in corpus.questions: para_scores =", "model from scratch') word_dict = feature_dict = None # create or get vocab", "'nag': optimizer = NAG(parameters, args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay) else: raise RuntimeError('Unsupported optimizer: %s' %", "= [] qid2idx = {} cum_num_lens = [] all_correct_ans = {} cum_num_len =", "args.src, \"data\", args.domain, args.dev_file_name), \"rb\") all_dev_exs = pickle.load(fin) fin.close() if args.test == 1:", "y_num_occurrences = Variable(ex[-2]) labels = (y_num_occurrences > 0).float() labels = labels.data.numpy() scores =", "in enumerate(ranked_para_ids): if labels[counter + no_paras ] ==1: if i <= 4: top_5", "in tqdm(enumerate(data_loader)): ret_input = [*ex] y_num_occurrences = ex[3] labels = (y_num_occurrences > 0)", "ques = model.return_topk(5,*ret_input) except RuntimeError: import pdb pdb.set_trace() num_paras = ex[1] qids =", "assert cum_num_lens[-1] == documents.shape[0] assert questions.shape[0] == documents.shape[0] assert len(cum_num_lens) == len(qid2idx) assert", "0} def make_data_loader(args, corpus, train_time=False): dataset = data.MultiCorpusDataset( args, corpus, args.word_dict, args.feature_dict, single_answer=False,", "\"/iesl/canvas/sdhuliawala/vectors_web/dev/\" if not os.path.exists(OUT_DIR): os.mkdir(OUT_DIR) else: shutil.rmtree(OUT_DIR, ignore_errors=True) os.mkdir(OUT_DIR) #Test cases assert cum_num_lens[-1]", "logger.info(\"Printing vectors at {}\".format(OUT_DIR)) if not os.path.exists(OUT_DIR): os.makedirs(OUT_DIR) else: shutil.rmtree(OUT_DIR, ignore_errors=True) os.makedirs(OUT_DIR) json.dump(qid2idx,", "= torch.load(args.pretrained) word_dict = model['word_dict'] feature_dict = model['feature_dict'] args.vocab_size = len(word_dict) args.embedding_dim_orig =", "the annotated dev set.') logger.info('top-1: {:2.4f}, top-3: {:2.4f}, top-5: {:2.4f}'.format( (top_1 * 1.0", "fin = open(os.path.join(args.data_dir, args.src, \"data\", args.domain, args.train_file_name), \"rb\") all_train_exs = pickle.load(fin) fin.close() fin", "scores = scores.reshape((-1)) if save_scores: for i, pid in enumerate(ex[-1]): para_vectors[pid] = doc[i]", "ret_model, all_dev_exs, dev_loader, verified_dev_loader=None) if stats['best_acc'] < top1: stats['best_acc'] = top1 logger.info('Best accuracy", "F.sigmoid(scores) y_num_occurrences = Variable(ex[-2]) labels = (y_num_occurrences > 0).float() labels = labels.data.numpy() scores", "parameters is not None and len(parameters) > 0: if args.optimizer == 'sgd': optimizer", "'%m/%d/%Y %I:%M:%S %p') console = logging.StreamHandler() console.setFormatter(fmt) logger.addHandler(console) if args.log_file: if args.checkpoint: logfile", "enumerate(topk_paras): total_num_questions += 1 for i, no_paras in enumerate(ranked_para_ids): if labels[counter + no_paras", "= doc[i] for i, qid in enumerate([corpus.paragraphs[pid].qid for pid in ex[-1]]): if qid", "0 args.train_time = False ret_model.model.eval() para_vectors = {} question_vectors = {} for idx,", "feature_dict = init_from_scratch(args, all_train_exs) else: ret_model, optimizer, word_dict, feature_dict = init_from_checkpoint(args) # make", "reverse=True) if sorted_para_scores[0][1] > 0: top1 += 1 if sum([ans[1] for ans in", "## saving code if train: OUT_DIR = os.path.join(args.save_dir, args.src, args.domain, \"train/\") else: if", "for q_counter, ranked_para_ids in enumerate(topk_paras): total_num_questions += 1 for i, no_paras in enumerate(ranked_para_ids):", "if args.test: args.is_test = 1 logger.info(\"Saving test paragraph vectors\") save_vectors(args, ret_model, all_test_exs, test_loader,", "logger.info('Model loaded...') return ret, optimizer, word_dict, feature_dict def init_from_scratch(args, train_exs): logger.info('Initializing model from", "## check pointing## save(args, ret_model.model, optimizer, args.model_file+\".ckpt\", epoch=stats['epoch']) logger.info(\"Evaluating on the full dev", "logger.info('top1 = {}, top3 = {}, top5 = {} '.format(top1, top3 ,top5 ))", "total_exs = 0 args.train_time = False ret_model.model.eval() para_vectors = {} question_vectors = {}", "if sum([ans[1] for ans in sorted_para_scores[:3]]) > 0: top3 += 1 if sum([ans[1]", "RandomSampler(dataset) loader = torch.utils.data.DataLoader( dataset, batch_size=args.batch_size, sampler=sampler, num_workers=args.data_workers, collate_fn=vector.batchify(args, args.para_mode, train_time=train_time), pin_memory=True )", "data, vector from model.retriever import LSTMRetriever from multi_corpus import MultiCorpus from torch.utils.data.sampler import", "= 0 top5 = 0 for qid in corpus.questions: para_scores = [(corpus.paragraphs[pid].tfidf_score, corpus.paragraphs[pid].ans_occurance)", "args = config.get_args() # Set cuda args.cuda = not args.no_cuda and torch.cuda.is_available() if", "enumerate(ex[-1]): corpus.paragraphs[pid].model_score = scores[i] scores = scores > 0.5 a = scores ==", "if args.cuda: torch.cuda.manual_seed(args.random_seed) # Set logging logger.setLevel(logging.INFO) fmt = logging.Formatter('%(asctime)s: %(message)s', '%m/%d/%Y %I:%M:%S", "1: args.train_file_name = args.train_file_name + \"_small\" args.dev_file_name = args.dev_file_name + \"_small\" if args.test", "optimizer, word_dict, feature_dict def init_from_scratch(args, train_exs): logger.info('Initializing model from scratch') word_dict = feature_dict", "save(args, ret_model.model, optimizer, args.model_file, epoch=stats['epoch']) if __name__ == '__main__': # MODEL logger.info('-' *", "<filename>paragraph_encoder/train_para_encoder.py import torch import numpy as np import json import os import pickle", "ignore_errors=True) os.mkdir(OUT_DIR) #Test cases assert cum_num_lens[-1] == documents.shape[0] assert questions.shape[0] == documents.shape[0] assert", "1.0 / total_num_questions))) ## saving code if args.save_para_clf_output: if eval_on_train_set: OUT_DIR = \"/iesl/canvas/sdhuliawala/vectors_web/train/\"", "0: top1 += 1 if sum([ans[1] for ans in sorted_para_scores[:3]]) > 0: top3", "{} '.format(top1 / len(corpus.questions), top3 / len(corpus.questions), top5 / len(corpus.questions))) def run_predictions(args, data_loader,", "torch.save(params, filename) # bad hack for not saving dictionary twice args.word_dict = params['word_dict']", "def run_predictions(args, data_loader, model, eval_on_train_set=False): args.train_time = False top_1 = 0 top_3 =", "logging.FileHandler(args.log_file, 'w') logfile.setFormatter(fmt) logger.addHandler(logfile) logger.info('[ COMMAND: %s ]' % ' '.join(sys.argv)) # Run!", "{}, top5 = {} '.format(top1, top3 ,top5 )) return top1 def get_topk_tfidf(corpus): top1", "1.0 / total_num_questions), (top_3 * 1.0 / total_num_questions), (top_5 * 1.0 / total_num_questions)", "for para_i, pid in enumerate(corpus.questions[qid].pids): if corpus.paragraphs[pid].ans_occurance > 0: labels.append(para_i) all_para_vectors.append(para_vectors[pid]) all_correct_ans[qid] =", "{ 'state_dict': { 'para_clf': model.state_dict(), 'optimizer': optimizer.state_dict() }, 'word_dict': args.word_dict, 'feature_dict': args.feature_dict }", "momentum=args.momentum, weight_decay=args.weight_decay) else: raise RuntimeError('Unsupported optimizer: %s' % args.optimizer) optimizer.load_state_dict(model['state_dict']['optimizer']) logger.info('Model loaded...') return", "* 1.0 / total_num_questions), (top_5 * 1.0 / total_num_questions) def save(args, model, optimizer,", "j in range(num_paras[i]): if labels[st+j] == 1: all_correct_answers[map_counter].append(j) ### Test case: assert len(all_correct_answers[map_counter])", "logger.info(\"Loading pickle files\") fin = open(os.path.join(args.data_dir, args.src, \"data\", args.domain, args.train_file_name), \"rb\") all_train_exs =", "y_num_occurrences = Variable(ex[-2]) labels = (y_num_occurrences > 0).float() labels = labels.cuda() # BCE", "train=False, test=False): total_exs = 0 args.train_time = False ret_model.model.eval() para_vectors = {} question_vectors", "__name__ == '__main__': # MODEL logger.info('-' * 100) # Parse cmdline args and", "train_exs) if word_dict is not None: args.vocab_size = len(word_dict) args.embedding_dim_orig = args.embedding_dim args.word_dict", "in question_vectors: question_vectors[qid] = ques[i] for i, pid in enumerate(ex[-1]): corpus.paragraphs[pid].model_score = scores[i]", "= scores.reshape((-1)) if save_scores: for i, pid in enumerate(ex[-1]): para_vectors[pid] = doc[i] for", "top1 = 0 top3 = 0 top5 = 0 for qid in corpus.questions:", "args.embedding_dim args.word_dict = word_dict args.feature_dict = feature_dict ret = LSTMRetriever(args, word_dict, feature_dict) #", "None: continue inputs = [e if e is None or type(e) != type(ex[0])", "None or type(e) != type(ex[0]) else Variable(e.cuda(async=True)) for e in ex[:]] ret_input =", "for i, qid in enumerate([corpus.paragraphs[pid].qid for pid in ex[-1]]): if qid not in", "ret = LSTMRetriever(args, word_dict, feature_dict) # load saved param values ret.model.load_state_dict(model['state_dict']['para_clf']) optimizer =", "= 0 cum_num_lens = [] qid2idx = {} sum_num_paras = 0 all_correct_answers =", "get_topk(corpus) return top1 def print_vectors(args, para_vectors, question_vectors, corpus, train=False, test=False): all_question_vectors = []", "= {}, top5 = {} '.format(top1, top3 ,top5 )) return top1 def get_topk_tfidf(corpus):", "import tqdm from torch.autograd import Variable import torch.optim as optim import torch.nn.functional as", "\"data\", args.domain, args.dev_file_name), \"rb\") all_dev_exs = pickle.load(fin) fin.close() if args.test == 1: fin", "for i, no_paras in enumerate(ranked_para_ids): if labels[counter + no_paras ] ==1: if i", "= np.array(cum_num_lens) np.save(OUT_DIR + \"document\", all_para_vectors) np.save(OUT_DIR + \"question\", all_question_vectors) np.save(OUT_DIR + \"all_cumlen\",", "para_vectors, question_vectors, corpus, train, test) def get_topk(corpus): top1 = 0 top3 = 0", "top_3 += 1 if i <= 0: top_1 += 1 break counter +=", "= {:2.4f}'.format( stats['epoch'], idx, len(train_loader), para_loss.avg)) para_loss.reset() def eval_binary_classification(args, ret_model, corpus, dev_loader, verified_dev_loader=None,", "all_question_vectors) np.save(OUT_DIR + \"all_cumlen\", cum_num_lens) def save_vectors(args, ret_model, corpus, data_loader, verified_dev_loader=None, save_scores =", "\"data\", args.domain, args.test_file_name), \"rb\") all_test_exs = pickle.load(fin) fin.close() logger.info(\"Loading done!\") logger.info(\"Num train examples", "LSTMRetriever from multi_corpus import MultiCorpus from torch.utils.data.sampler import SequentialSampler, RandomSampler import math logger", "args.train_file_name = args.train_file_name + \"_small\" args.dev_file_name = args.dev_file_name + \"_small\" if args.test ==", "ret_model.score_paras(*ret_input) scores = F.sigmoid(scores) y_num_occurrences = Variable(ex[-2]) labels = (y_num_occurrences > 0).float() labels", "\"_small\" args.dev_file_name = args.dev_file_name + \"_small\" if args.test == 1: args.test_file_name = args.test_file_name", "corpus, train_time=False): dataset = data.MultiCorpusDataset( args, corpus, args.word_dict, args.feature_dict, single_answer=False, para_mode=args.para_mode, train_time=train_time )", "for j in range(num_paras[i]): if labels[st+j] == 1: all_correct_answers[map_counter].append(j) ### Test case: assert", "feature_dict def init_from_scratch(args, train_exs): logger.info('Initializing model from scratch') word_dict = feature_dict = None", "total_num_questions = 0 map_counter = 0 cum_num_lens = [] qid2idx = {} sum_num_paras", "\"question\", questions) np.save(OUT_DIR + \"all_cumlen\", all_cumlen) return (top_1 * 1.0 / total_num_questions), (top_3", "np.save(OUT_DIR + \"all_cumlen\", cum_num_lens) def save_vectors(args, ret_model, corpus, data_loader, verified_dev_loader=None, save_scores = True,", "= {} question_vectors = {} for idx, ex in enumerate(tqdm(data_loader)): if ex is", "# ------------------------------------------------------------------------------ # Main. # ------------------------------------------------------------------------------ def main(args): # PRINT CONFIG logger.info('-' *", "= {} | iter={}/{} | para loss = {:2.4f}'.format( stats['epoch'], idx, len(train_loader), para_loss.avg))", "os import pickle import sys import logging import shutil from tqdm import tqdm", "(top_5 * 1.0 / total_num_questions))) ## saving code if args.save_para_clf_output: if eval_on_train_set: OUT_DIR", "== None: args.word_dict = utils.build_word_dict(args, (all_train_exs, all_dev_exs)) word_dict = args.word_dict train_loader = make_data_loader(args,", "for i, pid in enumerate(ex[-1]): para_vectors[pid] = doc[i] for i, qid in enumerate([corpus.paragraphs[pid].qid", "args.domain, args.dev_file_name), \"rb\") all_dev_exs = pickle.load(fin) fin.close() if args.test == 1: fin =", "= (y_num_occurrences > 0).float() labels = labels.data.numpy() scores = scores.cpu().data.numpy() scores = scores.reshape((-1))", "= utils.Timer() stats = {'timer': global_timer, 'epoch': 0, 'best_valid': 0, 'best_verified_valid': 0, 'best_acc':", "if save_scores: for i, pid in enumerate(ex[-1]): para_vectors[pid] = doc[i] for i, qid", "if args.save_para_clf_output: if eval_on_train_set: OUT_DIR = \"/iesl/canvas/sdhuliawala/vectors_web/train/\" else: OUT_DIR = \"/iesl/canvas/sdhuliawala/vectors_web/dev/\" if not", "scores.reshape((-1)) if save_scores: for i, pid in enumerate(ex[-1]): corpus.paragraphs[pid].model_score = scores[i] scores =", "None # create or get vocab word_dict = utils.build_word_dict(args, train_exs) if word_dict is", "paragraph vectors\") save_vectors(args, ret_model, all_train_exs, train_loader, verified_dev_loader=None, train=True) if args.test: args.is_test = 1", "ret_input = [*ex] y_num_occurrences = ex[3] labels = (y_num_occurrences > 0) try: topk_paras,", "/ total_num_questions), (top_3 * 1.0 / total_num_questions), (top_5 * 1.0 / total_num_questions) def", "all_dev_exs)) word_dict = args.word_dict train_loader = make_data_loader(args, all_train_exs, train_time=False) if args.eval_only else make_data_loader(args,", "= ret.get_trainable_params() if args.optimizer == 'sgd': optimizer = optim.SGD(parameters, args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay) elif", "= get_topk(corpus) return top1 def print_vectors(args, para_vectors, question_vectors, corpus, train=False, test=False): all_question_vectors =", "0 top5 = 0 for qid in corpus.questions: para_scores = [(corpus.paragraphs[pid].model_score,corpus.paragraphs[pid].ans_occurance ) for", "= {'timer': global_timer, 'epoch': 0, 'best_valid': 0, 'best_verified_valid': 0, 'best_acc': 0, 'best_verified_acc': 0}", "< top1: stats['best_acc'] = top1 logger.info('Best accuracy {}'.format(stats['best_acc'])) logger.info('Saving model at {}'.format(args.model_file)) logger.info(\"Logs", "0 map_counter = 0 cum_num_lens = [] qid2idx = {} sum_num_paras = 0", "loss = {:2.4f}'.format( stats['epoch'], idx, len(train_loader), para_loss.avg)) para_loss.reset() def eval_binary_classification(args, ret_model, corpus, dev_loader,", "json.dump(qid2idx, open(OUT_DIR + 'map.json', 'w')) json.dump(all_correct_ans, open(OUT_DIR + 'correct_paras.json', 'w')) all_cumlen = np.array(cum_num_lens)", "top5 / len(corpus.questions))) def run_predictions(args, data_loader, model, eval_on_train_set=False): args.train_time = False top_1 =", "qid in enumerate(qids): qid2idx[qid] = map_counter sum_num_paras += num_paras[i] cum_num_lens.append(sum_num_paras) all_correct_answers[map_counter] = []", "{}\".format(len(all_test_exs.paragraphs))) if args.pretrained is None: ret_model, optimizer, word_dict, feature_dict = init_from_scratch(args, all_train_exs) else:", "i, pid in enumerate(ex[-1]): corpus.paragraphs[pid].model_score = scores[i] scores = scores > 0.5 a", "ex[-1] if args.save_para_clf_output: docs = docs.cpu().data.numpy() ques = ques.cpu().data.numpy() if ex_counter == 0:", "logging.FileHandler(args.log_file, 'a') else: logfile = logging.FileHandler(args.log_file, 'w') logfile.setFormatter(fmt) logger.addHandler(logfile) logger.info('[ COMMAND: %s ]'", "init_from_checkpoint(args) # make data loader logger.info(\"Making data loaders...\") if word_dict == None: args.word_dict", "= None parameters = ret.get_trainable_params() if args.optimizer == 'sgd': optimizer = optim.SGD(parameters, args.learning_rate,", "'adamax': optimizer = optim.Adamax(parameters, weight_decay=args.weight_decay) elif args.optimizer == 'nag': optimizer = NAG(parameters, args.learning_rate,", "else: pass return ret, optimizer, word_dict, feature_dict def train_binary_classification(args, ret_model, optimizer, train_loader, verified_dev_loader=None):", "word_dict, feature_dict) # -------------------------------------------------------------------------- # TRAIN/VALID LOOP # -------------------------------------------------------------------------- # train parameters =", "+ num_paras[i]]) map_counter += 1 counter = 0 for q_counter, ranked_para_ids in enumerate(topk_paras):", "labels = [] all_question_vectors.append(question_vectors[qid]) qid2idx[qid] = question_i cum_num_len += len(corpus.questions[qid].pids) cum_num_lens.append(cum_num_len) for para_i,", "saved param values ret.model.load_state_dict(model['state_dict']['para_clf']) optimizer = None parameters = ret.get_trainable_params() if args.optimizer ==", "0 and idx > 0: logger.info('Epoch = {} | iter={}/{} | para loss", "= ret_model.score_paras(*ret_input) y_num_occurrences = Variable(ex[-2]) labels = (y_num_occurrences > 0).float() labels = labels.cuda()", "x[0], reverse=True) if sorted_para_scores[0][1] > 0: top1 += 1 if sum([ans[1] for ans", "0: documents = docs questions = ques else: documents = np.concatenate([documents, docs]) questions", "logger.info(\"Saving train paragraph vectors\") save_vectors(args, ret_model, all_train_exs, train_loader, verified_dev_loader=None, train=True) if args.test: args.is_test", "cum_num_lens = [] all_correct_ans = {} cum_num_len = 0 for question_i, qid in", "sum([ans[1] for ans in sorted_para_scores[:5]]) > 0: top5 += 1 logger.info( 'top1 =", "> 0: labels.append(para_i) all_para_vectors.append(para_vectors[pid]) all_correct_ans[qid] = labels all_para_vectors = np.stack(all_para_vectors) all_question_vectors = np.stack(all_question_vectors)", "if args.optimizer == 'sgd': optimizer = optim.SGD(parameters, args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay) elif args.optimizer ==", "top1 += 1 if sum([ans[1] for ans in sorted_para_scores[:3]]) > 0: top3 +=", "open(OUT_DIR + 'map.json', 'w')) json.dump(all_correct_ans, open(OUT_DIR + 'correct_paras.json', 'w')) all_cumlen = np.array(cum_num_lens) np.save(OUT_DIR", "corpus.questions[qid].pids] sorted_para_scores = sorted(para_scores, key=lambda x: x[0], reverse=True) if sorted_para_scores[0][1] > 0: top1", "of para classifier when evaluated on the annotated dev set.') logger.info('top-1: {:2.4f}, top-3:", "labels = (y_num_occurrences > 0).float() labels = labels.data.numpy() scores = scores.cpu().data.numpy() scores =", "else Variable(e.cuda(async=True)) for e in ex[:]] ret_input = [*inputs[:4]] total_exs += ex[0].size(0) scores,", "> 0).float() labels = labels.cuda() # BCE logits loss batch_para_loss = F.binary_cross_entropy_with_logits(scores.squeeze(1), labels)", "= [*ex] y_num_occurrences = ex[3] labels = (y_num_occurrences > 0) try: topk_paras, docs,", "from saved checkpoint {}'.format(args.pretrained)) model = torch.load(args.pretrained) word_dict = model['word_dict'] feature_dict = model['feature_dict']", "for e in ex[:]] ret_input = [*inputs[:4]] total_exs += ex[0].size(0) scores, doc, ques", "+ \".pkl\" logger.info(\"Loading pickle files\") fin = open(os.path.join(args.data_dir, args.src, \"data\", args.domain, args.train_file_name), \"rb\")", "train_time=True) dev_loader = make_data_loader(args, all_dev_exs) if args.test: test_loader = make_data_loader(args, all_test_exs) if args.eval_only:", "top3 = 0 top5 = 0 for qid in corpus.questions: para_scores = [(corpus.paragraphs[pid].tfidf_score,", "= utils.AverageMeter() ret_model.model.train() for idx, ex in enumerate(train_loader): if ex is None: continue", "None params['config'] = vars(args) if epoch: params['epoch'] = epoch try: torch.save(params, filename) #", "for e in ex[:]] ret_input = [*inputs[:4]] scores, _, _ = ret_model.score_paras(*ret_input) y_num_occurrences", "= docs questions = ques else: documents = np.concatenate([documents, docs]) questions = np.concatenate([questions,", "docs questions = ques else: documents = np.concatenate([documents, docs]) questions = np.concatenate([questions, ques])", "def save(args, model, optimizer, filename, epoch=None): params = { 'state_dict': { 'para_clf': model.state_dict(),", "= None params['config'] = vars(args) if epoch: params['epoch'] = epoch try: torch.save(params, filename)", "else: OUT_DIR = \"/iesl/canvas/sdhuliawala/vectors_web/dev/\" if not os.path.exists(OUT_DIR): os.mkdir(OUT_DIR) else: shutil.rmtree(OUT_DIR, ignore_errors=True) os.mkdir(OUT_DIR) #Test", "train_exs): logger.info('Initializing model from scratch') word_dict = feature_dict = None # create or", "corpus.questions: para_scores = [(corpus.paragraphs[pid].model_score,corpus.paragraphs[pid].ans_occurance ) for pid in corpus.questions[qid].pids] sorted_para_scores = sorted(para_scores, key=lambda", "%p') console = logging.StreamHandler() console.setFormatter(fmt) logger.addHandler(console) if args.log_file: if args.checkpoint: logfile = logging.FileHandler(args.log_file,", "logger.info('Initializing model from scratch') word_dict = feature_dict = None # create or get", "collate_fn=vector.batchify(args, args.para_mode, train_time=train_time), pin_memory=True ) return loader def init_from_checkpoint(args): logger.info('Loading model from saved", "range(args.num_epochs): stats['epoch'] = epoch train_binary_classification(args, ret_model, optimizer, train_loader, verified_dev_loader=None) logger.info('checkpointing model at {}'.format(args.model_file))", "ex is None: raise BrokenPipeError inputs = [e if e is None or", "open(OUT_DIR + 'correct_paras.json', 'w')) all_cumlen = np.array(cum_num_lens) np.save(OUT_DIR + \"document\", documents) np.save(OUT_DIR +", "import logging import shutil from tqdm import tqdm from torch.autograd import Variable import", "save(args, ret_model.model, optimizer, args.model_file+\".ckpt\", epoch=stats['epoch']) logger.info(\"Evaluating on the full dev set....\") top1 =", "pid in corpus.questions[qid].pids] sorted_para_scores = sorted(para_scores, key=lambda x: x[0]) # import pdb #", "1 logger.info(\"Saving test paragraph vectors\") save_vectors(args, ret_model, all_test_exs, test_loader, verified_dev_loader=None) else: get_topk_tfidf(all_dev_exs) for", "= question_i cum_num_len += len(corpus.questions[qid].pids) cum_num_lens.append(cum_num_len) for para_i, pid in enumerate(corpus.questions[qid].pids): if corpus.paragraphs[pid].ans_occurance", "optim import torch.nn.functional as F from torch.utils.data.sampler import RandomSampler import config from model", "word_dict is not None: args.vocab_size = len(word_dict) args.embedding_dim_orig = args.embedding_dim args.word_dict = word_dict", "{}'.format(args.pretrained)) model = torch.load(args.pretrained) word_dict = model['word_dict'] feature_dict = model['feature_dict'] args.vocab_size = len(word_dict)", "= np.stack(all_para_vectors) all_question_vectors = np.stack(all_question_vectors) assert all_para_vectors.shape[0] == cum_num_lens[-1] assert all_question_vectors.shape[0] == len(cum_num_lens)", "in enumerate(train_loader): if ex is None: continue inputs = [e if e is", "# make data loader logger.info(\"Making data loaders...\") if word_dict == None: args.word_dict =", "MultiCorpus from torch.utils.data.sampler import SequentialSampler, RandomSampler import math logger = logging.getLogger() global_timer =", "weight_decay=args.weight_decay) elif args.optimizer == 'nag': optimizer = NAG(parameters, args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay) else: raise", "import config from model import utils, data, vector from model.retriever import LSTMRetriever from", "= F.sigmoid(scores) y_num_occurrences = Variable(ex[-2]) labels = (y_num_occurrences > 0).float() labels = labels.data.numpy()", "(top_5 * 1.0 / total_num_questions) def save(args, model, optimizer, filename, epoch=None): params =", "questions = ques else: documents = np.concatenate([documents, docs]) questions = np.concatenate([questions, ques]) ###", "in enumerate(ex[-1]): para_vectors[pid] = doc[i] for i, qid in enumerate([corpus.paragraphs[pid].qid for pid in", "loaders...\") if word_dict == None: args.word_dict = utils.build_word_dict(args, (all_train_exs, all_dev_exs)) word_dict = args.word_dict", "= map_counter sum_num_paras += num_paras[i] cum_num_lens.append(sum_num_paras) all_correct_answers[map_counter] = [] st = sum(num_paras[:i]) for", "model import utils, data, vector from model.retriever import LSTMRetriever from multi_corpus import MultiCorpus", "= sum(num_paras[:i]) for j in range(num_paras[i]): if labels[st+j] == 1: all_correct_answers[map_counter].append(j) ### Test", "test=False): total_exs = 0 args.train_time = False ret_model.model.eval() para_vectors = {} question_vectors =", "{}\".format(len(all_train_exs.paragraphs))) logger.info(\"Num dev examples {}\".format(len(all_dev_exs.paragraphs))) if args.test == 1: logger.info(\"Num test examples {}\".format(len(all_test_exs.paragraphs)))", "if math.isnan(para_loss.avg): import pdb pdb.set_trace() if idx % 25 == 0 and idx", "model at {}'.format(args.model_file)) logger.info(\"Logs saved at {}\".format(args.log_file)) save(args, ret_model.model, optimizer, args.model_file, epoch=stats['epoch']) if", "None and len(parameters) > 0: if args.optimizer == 'sgd': optimizer = optim.SGD(parameters, args.learning_rate,", "if args.pretrained is None: ret_model, optimizer, word_dict, feature_dict = init_from_scratch(args, all_train_exs) else: ret_model,", "pdb.set_trace() if sorted_para_scores[0][1] > 0: top1 += 1 if sum([ans[1] for ans in", "word_dict = feature_dict = None # create or get vocab word_dict = utils.build_word_dict(args,", "top5 = 0 for qid in corpus.questions: para_scores = [(corpus.paragraphs[pid].tfidf_score, corpus.paragraphs[pid].ans_occurance) for pid", "> 0).float() labels = labels.data.numpy() scores = scores.cpu().data.numpy() scores = scores.reshape((-1)) if save_scores:", "'best_valid': 0, 'best_verified_valid': 0, 'best_acc': 0, 'best_verified_acc': 0} def make_data_loader(args, corpus, train_time=False): dataset", "+= 1 logger.info( 'top1 = {}, top3 = {}, top5 = {} '.format(top1", "x[0]) # import pdb # pdb.set_trace() if sorted_para_scores[0][1] > 0: top1 += 1", "optimizer, word_dict, feature_dict = init_from_scratch(args, all_train_exs) else: ret_model, optimizer, word_dict, feature_dict = init_from_checkpoint(args)", "0: top5 += 1 top1 = top1/len(corpus.questions) top3 = top3/len(corpus.questions) top5 = top5/len(corpus.questions)", "examples {}\".format(len(all_train_exs.paragraphs))) logger.info(\"Num dev examples {}\".format(len(all_dev_exs.paragraphs))) if args.test == 1: logger.info(\"Num test examples", "0, 'best_valid': 0, 'best_verified_valid': 0, 'best_acc': 0, 'best_verified_acc': 0} def make_data_loader(args, corpus, train_time=False):", "<= 2: top_3 += 1 if i <= 0: top_1 += 1 break", "| para loss = {:2.4f}'.format( stats['epoch'], idx, len(train_loader), para_loss.avg)) para_loss.reset() def eval_binary_classification(args, ret_model,", "Variable(e.cuda(async=True)) for e in ex[:]] ret_input = [*inputs[:4]] total_exs += ex[0].size(0) scores, _,", "files\") fin = open(os.path.join(args.data_dir, args.src, \"data\", args.domain, args.train_file_name), \"rb\") all_train_exs = pickle.load(fin) fin.close()", "not saving dictionary twice args.word_dict = params['word_dict'] args.feature_dict = params['feature_dict'] except BaseException: logger.warn('[", "args.src, \"data\", args.domain, args.train_file_name), \"rb\") all_train_exs = pickle.load(fin) fin.close() fin = open(os.path.join(args.data_dir, args.src,", "question_vectors[qid] = ques[i] for i, pid in enumerate(ex[-1]): corpus.paragraphs[pid].model_score = scores[i] get_topk(corpus) print_vectors(args,", "None if parameters is not None and len(parameters) > 0: if args.optimizer ==", "# bad hack for not saving dictionary twice args.word_dict = params['word_dict'] args.feature_dict =", "+= 1 for i, no_paras in enumerate(ranked_para_ids): if labels[counter + no_paras ] ==1:", "{:2.4f}'.format( stats['epoch'], idx, len(train_loader), para_loss.avg)) para_loss.reset() def eval_binary_classification(args, ret_model, corpus, dev_loader, verified_dev_loader=None, save_scores", "= ex[3] labels = (y_num_occurrences > 0) try: topk_paras, docs, ques = model.return_topk(5,*ret_input)", "for not saving dictionary twice args.word_dict = params['word_dict'] args.feature_dict = params['feature_dict'] except BaseException:", "model, optimizer, filename, epoch=None): params = { 'state_dict': { 'para_clf': model.state_dict(), 'optimizer': optimizer.state_dict()", "'feature_dict': args.feature_dict } args.word_dict = None args.feature_dict = None params['config'] = vars(args) if", "ex[:]] ret_input = [*inputs[:4]] total_exs += ex[0].size(0) scores, doc, ques = ret_model.score_paras(*ret_input) scores", "optimizer.load_state_dict(model['state_dict']['optimizer']) logger.info('Model loaded...') return ret, optimizer, word_dict, feature_dict def init_from_scratch(args, train_exs): logger.info('Initializing model", "fmt = logging.Formatter('%(asctime)s: %(message)s', '%m/%d/%Y %I:%M:%S %p') console = logging.StreamHandler() console.setFormatter(fmt) logger.addHandler(console) if", "from model.retriever import LSTMRetriever from multi_corpus import MultiCorpus from torch.utils.data.sampler import SequentialSampler, RandomSampler", "os.path.exists(OUT_DIR): os.mkdir(OUT_DIR) else: shutil.rmtree(OUT_DIR, ignore_errors=True) os.mkdir(OUT_DIR) #Test cases assert cum_num_lens[-1] == documents.shape[0] assert", "if args.checkpoint: logfile = logging.FileHandler(args.log_file, 'a') else: logfile = logging.FileHandler(args.log_file, 'w') logfile.setFormatter(fmt) logger.addHandler(logfile)", "paragraph vectors\") save_vectors(args, ret_model, all_dev_exs, dev_loader, verified_dev_loader=None) logger.info(\"Saving train paragraph vectors\") save_vectors(args, ret_model,", "num_workers=args.data_workers, collate_fn=vector.batchify(args, args.para_mode, train_time=train_time), pin_memory=True ) return loader def init_from_checkpoint(args): logger.info('Loading model from", "= [] st = sum(num_paras[:i]) for j in range(num_paras[i]): if labels[st+j] == 1:", "raise RuntimeError('Unsupported optimizer: %s' % args.optimizer) else: pass return ret, optimizer, word_dict, feature_dict", "ret_model.model.eval() para_vectors = {} question_vectors = {} for idx, ex in enumerate(tqdm(data_loader)): if", "stats['best_acc'] < top1: stats['best_acc'] = top1 logger.info('Best accuracy {}'.format(stats['best_acc'])) logger.info('Saving model at {}'.format(args.model_file))", "as np import json import os import pickle import sys import logging import", "save_vectors(args, ret_model, all_test_exs, test_loader, verified_dev_loader=None) else: get_topk_tfidf(all_dev_exs) for epoch in range(args.num_epochs): stats['epoch'] =", "for ans in sorted_para_scores[:5]]) > 0: top5 += 1 logger.info( 'top1 = {},", "i <= 0: top_1 += 1 break counter += num_paras[q_counter] logger.info('Accuracy of para", "all_para_vectors) np.save(OUT_DIR + \"question\", all_question_vectors) np.save(OUT_DIR + \"all_cumlen\", cum_num_lens) def save_vectors(args, ret_model, corpus,", "[] all_question_vectors.append(question_vectors[qid]) qid2idx[qid] = question_i cum_num_len += len(corpus.questions[qid].pids) cum_num_lens.append(cum_num_len) for para_i, pid in", "= feature_dict ret = LSTMRetriever(args, word_dict, feature_dict) # load saved param values ret.model.load_state_dict(model['state_dict']['para_clf'])", "ranked_para_ids in enumerate(topk_paras): total_num_questions += 1 for i, no_paras in enumerate(ranked_para_ids): if labels[counter", "= pickle.load(fin) fin.close() fin = open(os.path.join(args.data_dir, args.src, \"data\", args.domain, args.dev_file_name), \"rb\") all_dev_exs =", "= (y_num_occurrences > 0).float() labels = labels.cuda() # BCE logits loss batch_para_loss =", "os.path.join(args.save_dir, args.src, args.domain, \"test/\") logger.info(\"Printing vectors at {}\".format(OUT_DIR)) if not os.path.exists(OUT_DIR): os.makedirs(OUT_DIR) else:", "'w') logfile.setFormatter(fmt) logger.addHandler(logfile) logger.info('[ COMMAND: %s ]' % ' '.join(sys.argv)) # Run! main(args)", "total_num_questions) def save(args, model, optimizer, filename, epoch=None): params = { 'state_dict': { 'para_clf':", "np.stack(all_question_vectors) assert all_para_vectors.shape[0] == cum_num_lens[-1] assert all_question_vectors.shape[0] == len(cum_num_lens) assert all_question_vectors.shape[0] == len(qid2idx)", "data loaders...\") if word_dict == None: args.word_dict = utils.build_word_dict(args, (all_train_exs, all_dev_exs)) word_dict =", "args.train_time = False ret_model.model.eval() para_vectors = {} question_vectors = {} for idx, ex", "logger.info('Eval accuracy = {} '.format(accuracy/total_exs)) top1 = get_topk(corpus) return top1 def print_vectors(args, para_vectors,", "= os.path.join(args.save_dir, args.src, args.domain, \"train/\") else: if args.is_test == 0: OUT_DIR = os.path.join(args.save_dir,", "scores.cpu().data.numpy() scores = scores.reshape((-1)) if save_scores: for i, pid in enumerate(ex[-1]): para_vectors[pid] =", "if args.test == 1: args.test_file_name = args.test_file_name + \".pkl\" logger.info(\"Loading pickle files\") fin", "sorted(para_scores, key=lambda x: x[0]) # import pdb # pdb.set_trace() if sorted_para_scores[0][1] > 0:", "corpus, data_loader, verified_dev_loader=None, save_scores = True, train=False, test=False): total_exs = 0 args.train_time =", "ret_model, optimizer, word_dict, feature_dict = init_from_checkpoint(args) # make data loader logger.info(\"Making data loaders...\")", "if args.test == 1: args.test_file_name = args.test_file_name + \"_small\" args.train_file_name = args.train_file_name +", "> 0: top3 += 1 if sum([ans[1] for ans in sorted_para_scores[:5]]) > 0:", "not None: args.vocab_size = len(word_dict) args.embedding_dim_orig = args.embedding_dim args.word_dict = word_dict args.feature_dict =", "{ 'para_clf': model.state_dict(), 'optimizer': optimizer.state_dict() }, 'word_dict': args.word_dict, 'feature_dict': args.feature_dict } args.word_dict =", "logger.info(\"Num test examples {}\".format(len(all_test_exs.paragraphs))) if args.pretrained is None: ret_model, optimizer, word_dict, feature_dict =", "math.isnan(para_loss.avg): import pdb pdb.set_trace() if idx % 25 == 0 and idx >" ]
[ "import sys from pysnptools.snpreader.bed import Bed import subprocess cleanDataPath='/net/mraid08/export/jafar/Microbiome/Analyses/PNPChip/cleanData/' rawDataPath='/net/mraid08/export/jafar/Microbiome/Analyses/PNPChip/rawData' pheno_fn_bac =os.path.join(cleanDataPath,'noMissingKakiPhenotypesWithCovariates_nodfukim.phenotypes') #pheno_fn_bac", "activity - freq'] if 'bloodType' in args: mb_columns_extra += ['Blood A','Blood B','Blood RH-']", "'Green onions', 'Mushrooms', 'Lemon juice', 'Canned Tuna Fish', 'Vegetable Salad', 'Fried eggplant', 'Salmon',", "#print \"805175 has no FFQ!!!!! that is why we remove him\" features_to_drop=[] if", "which individuals to remove on the fly import ForPaper.VertexCut as vc df_household =", "'Melon Freq', 'Kiwi or Strawberries Freq', 'Mango Freq', 'Peach, Nectarine, Plum Freq', 'Pear", "and dic are mutually exclusive' if ('taxa' in list(kwargs.keys())): assert len(set(['all_bac','s', 'g','f','o','c','p','otu']).intersection(set(args)))==0, \\", "+= ['Ordinary Bread or Challah Freq', 'Light Bread Freq', 'Wholemeal or Rye Bread", "in args): features_to_drop += ['IsGenotek'] if ('covars' not in args) and ('covars_noPCs' not", "'Potatoes', 'Halva', 'Yellow pepper', 'Mango', 'Lasagna', 'Popcorn', 'Hummus Salad', 'Tilapia', 'Pizza', 'Fried cauliflower',", "in args or ('PCs') in args: mb_columns += [c for c in pheno.columns", "Skin Freq', 'Sausages Freq', 'Sausages such as Salami Freq', 'Pastrami or Smoked Turkey", "c[:2] not in ['s_','g_','f_','o_','c_','p_']] if 'dic' in args: presence=((pheno[mb_columns]>threshold +1e-5)&(pheno[mb_columns]!=0)).astype(int).sum() else: presence=(pheno[mb_columns]>threshold +1e-5).astype(int).sum()", "Cakes and Cookies as Rogallach, Croissant or Donut Freq', 'Cake, Torte Cakes, Chocolate", "Free Gum', 'Hamburger', 'Dark Beer', 'Cooked beets', 'Almonds', 'Falafel', 'Noodles', 'Jachnun', 'Turkey', 'Sushi',", "args: mb_columns_extra += ['Currently smokes','Ever smoked'] if 'sweets' in args: mb_columns_extra += ['Milk", "dafook in output.split('\\n')])] if ('16s' in args): pheno = pheno[[c for c in", "pheno_g = pheno_nodic[[c for c in pheno_nodic.columns if c[:2]=='g_']] else: if 'include_allPNP' in", "rawDataPath='/net/mraid08/export/jafar/Microbiome/Analyses/PNPChip/rawData' pheno_fn_bac =os.path.join(cleanDataPath,'noMissingKakiPhenotypesWithCovariates_nodfukim.phenotypes') #pheno_fn_bac =os.path.join(cleanDataPath,'allChipPhenotypes_nodfukimWith5PCair.phenotypes') pheno_fn_bacDic=os.path.join(cleanDataPath,'dicNoMissingKakiPhenotypesWithCovariates_nodfukim.phenotypes') pheno_fn_bacAllPNP=os.path.join(rawDataPath,'allPNPPhenotypes.phenotypes') iidsNoSharedEnv='/net/mraid08/export/genie/Microbiome/Analyses/PNPChip/cleanData/PNP_autosomal_clean2_nodfukim_NoCouples.txt' PNP_16S_DIR = '/net/mraid08/export/jafar/Microbiome/Analyses/PNPChip/16S' glycemicStatusPath='/net/mraid08/export/jafar/Microbiome/Analyses/PNPChip/glycemic_status.csv' def", "is mutual exclusive with all_bac,s,g,f,o,c,p,otu' if 'include_allPNP' in args: assert 'dic' not in", "substitute', 'Cheesecake', 'Raisins', 'Chocolate', 'Quinoa', 'Cooked broccoli', 'Beef Cholent', 'Cracker', 'Chocolate Cookies', 'White", "c[:2]=='s_']] pheno_g = pheno_nodic[[c for c in pheno_nodic.columns if c[:2]=='g_']] else: if 'include_allPNP'", "mutually exclusive' if 'otu' in args: assert '16s' in args if 'dic' in", "in args: mb_columns_extra += ['Egg Recipes Freq', 'Egg, Hard Boiled or Soft Freq',", "if 'include_allPNP' not in args or ('PCs') in args: mb_columns += [c for", "','_') for val in mb_columns_extra] mb_columns+=mb_columns_extra if 'meals' in args: mealsColumns=[val.replace(' ','_') for", "'Peas', 'Pecan', 'Cooked cauliflower', 'Cooked Sweet potato', 'Butter', 'Omelette', 'Coated Wafers', 'Boiled corn',", "Including Light Freq', 'Thousand Island Dressing, Garlic Dressing Freq', 'Honey, Jam, fruit syrup,", "'Beef or Chicken Soup Freq', 'Internal Organs Freq', 'Fish Cooked, Baked or Grilled", "or Dark Chocolate Freq', 'Salty Snacks Freq', 'Cheese Cakes or Cream Cakes Freq',", "with all_bac,s,g,f,o,c,p,otu' if 'include_allPNP' in args: assert 'dic' not in args, 'include_allPNP does", "c[:2] not in ('s_', 'g_', 'f_', 'o_', 'c_', 'p_')]] for taxa_level in ['otu',", "Peppers', 'Egg', 'Pear', 'Peas', 'Pecan', 'Cooked cauliflower', 'Cooked Sweet potato', 'Butter', 'Omelette', 'Coated", "\\ 'taxa is mutual exclusive with all_bac,s,g,f,o,c,p,otu' if 'include_allPNP' in args: assert 'dic'", "'Chocolate cake', 'Diet Coke', 'Dried dates', 'Carrot Cake', 'Doritos', 'Israeli couscous', 'Pistachio', 'Date", "'o_', 'c_', 'p_')] elif kwargs['taxa'][1]=='_': kwargs['taxa']=[kwargs['taxa']] for taxa in kwargs['taxa']: taxadf=pheno.filter(regex=(taxa)) mb_columns +=", "Freq','Cooked Tomatoes, Tomato Sauce, Tomato Soup Freq', 'Red Pepper Freq', 'Green Pepper Freq',", "output = subprocess.getstatusoutput(\"cut -f 1 %s -d ' ' | cut -f 1", "Cookies', 'White beans', 'Cooked zucchini', 'Sweet potato', 'Wine', 'Cookies', 'Challah', 'Spelled', 'Honey', 'Green", "'legumes','meatProducts','pastry','qualityOfLiving', 'smoking','sweets','vegetables','womenOnlyQuestions'] drug_args=['D.lipid', 'D.All', 'D.Psychiatric', 'D.pain', 'D.CVD', 'D.GI','D.Thyroid', 'D.NSAID','D.Contraception'] meals=['Vodka or Arak', 'Avocado',", "fruit syrup, Maple syrup Freq', 'White or Brown Sugar Freq', 'Artificial Sweeteners Freq',]", "are mutually exclusive' if 'otu' in args: assert '16s' in args if 'dic'", "('s_', 'g_', 'f_', 'o_', 'c_', 'p_')] elif kwargs['taxa'][1]=='_': kwargs['taxa']=[kwargs['taxa']] for taxa in kwargs['taxa']:", "'threshold','taxa'] for arg in args: assert arg in known_args, 'unkown arg: %s'%(arg) for", "Juice Freq', 'Corn Freq', 'Parsley, Celery, Fennel, Dill, Cilantro, Green Onion Freq', 'Fresh", "[c for c in pheno.columns if c[:2]=='f_' ] if 'o' in args: mb_columns", "int(c) in pheno.index]] df_household = df_household[df_household.index.isin(pheno.index)] remove_inds = df_household.index[vc.VertexCut().work(df_household.values, 0.5)] pheno=pheno[~pheno.index.isin(remove_inds)] if 'keep_missingCovars'", "if 'glucose' in args: mb_columns += ['95P_Glucose','Glucose_Noise','Max_Glucose','Median_Glucose','WakeupGlucose', 'MeanGlucoseResponse','MeanBreadResponse','MeanBreadButterResponse'] if 'ffq' in args: mb_columns", "= alpha_diversity_s[alpha_diversity_s < 15].index pheno=pheno[~pheno.index.isin(sterile_individuals)] if 'keep_household' not in args: #noSharedEnvIID=pandas.read_csv(iidsNoSharedEnv,usecols=[0],header=None,sep='\\t') #pheno=pheno[pheno.index.isin(noSharedEnvIID[0].astype(int).values)] #new", "in args: s_stats=['BMI','Cholesterol,total','WakeupGlucose','Albumin','Creatinine','HbA1C%','Height','Hips','Waist','WHR','HDLCholesterol'] #, 'Triglycerides', 'LDLCholesterol'] mb_columns+=s_stats mb_columns=list(set(mb_columns)) if 'fid' in args: mb_columns", "Freq', 'Herbal Tea Freq', 'Green Tea Freq', 'Regular Tea Freq', 'Beer Freq', 'Sweet", "['Work activity','Physical activity - mins','Physical activity - freq'] if 'activityTypesFreq' in args: mb_columns_extra", "kwargs['taxa'][0]=='*': kwargs['taxa']=[initial+kwargs['taxa'][1:] for initial in ('s_', 'g_', 'f_', 'o_', 'c_', 'p_')] elif kwargs['taxa'][1]=='_':", "or Arak', 'Avocado', 'Parsley', 'Coated peanuts', 'Sugar', 'Smoked Salmon', 'Melon', 'Roll', 'Whipped cream',", "'unkown kwarg: %s'%(kwarg) if ('16s' in args): assert 'dic' not in args, '16s", "Carrot Juice Freq', 'Corn Freq', 'Parsley, Celery, Fennel, Dill, Cilantro, Green Onion Freq',", "'Date honey', 'Vinaigrette', 'Bamba', 'Dark Chocolate', 'Turkey Shawarma', 'Olive oil', #u'Parmesan\\xc2\\xa0cheese', 'Guacamole', 'Coleslaw',", "+= [c for c in pheno.columns if c[:2]=='s_' ] if 'g' in args:", "1 -d '_'\"%os.path.join(rawDataPath,'tmp','dfukim.txt')) pheno =pheno[~pheno.index.isin([int(dafook) for dafook in output.split('\\n')])] if ('16s' in args):", "'Pretzels', 'Kohlrabi', 'Eggplant Salad', 'Cooked green beans', 'Cooked mushrooms', 'Watermelon', 'Grilled cheese', 'Bissli',", "mb_columns_extra += ['Currently smokes','Ever smoked'] if 'sweets' in args: mb_columns_extra += ['Milk or", "'legumes','meatProducts','pastry','qualityOfLiving', 'smoking','sweets','vegetables','womenOnlyQuestions', 'genotek_only', 'swab_only'] ffq_args = ['activity','activityTypesFreq','cereals', 'delivery','dressSweetners','drinks','fruits','hunger', 'legumes','meatProducts','pastry','qualityOfLiving', 'smoking','sweets','vegetables','womenOnlyQuestions'] drug_args=['D.lipid', 'D.All', 'D.Psychiatric',", "or Chicken Soup Freq', 'Internal Organs Freq', 'Fish Cooked, Baked or Grilled Freq',", "with Sugar Freq', 'Decaffeinated Coffee Freq', 'Coffee Freq', 'Herbal Tea Freq', 'Green Tea", "Waffles or Biscuits Freq', 'Simple Cookies or Biscuits Freq', 'Ice Cream or Popsicle", "Dressing, Garlic Dressing Freq', 'Honey, Jam, fruit syrup, Maple syrup Freq', 'White or", "['Falafel in Pita Bread Freq', 'Cooked Legumes Freq', 'Processed Meat Free Products Freq']", "df_glucose.merge(pheno, left_index=True, right_index=True,how='right') mb_columns +=['median_Without_BMI_ALT_Overall','WakeupGlucose','BMI','VegeterianScale'] pheno.loc[pheno['VegeterianScale']<0, 'VegeterianScale']=np.nan if 'drugs' in args: mb_columns+=drug_args else:", "kind','Type 1 activity - freq','T2Activity kind', 'Type 2 activity - freq','T3Activity kind','Type 3", "Flakes Freq'] if 'delivery' in args: mb_columns_extra += ['C-Section','Home delivery','Was breastfed'] if 'dressSweetners'", "+= ['Age','Gender','Calories_kcal','Carbs_g','Fat_g','Protain_g','IsGenotek'] if 'include_allPNP' not in args or ('PCs') in args: mb_columns +=", "Beer', 'Cooked beets', 'Almonds', 'Falafel', 'Noodles', 'Jachnun', 'Turkey', 'Sushi', 'Brazil nuts', 'Orange', 'Rice',", "# 'CRP(WIDERANGE)','CRPhs','Eosinophils%','HCT','HDLCholesterol','Hemoglobin','HbA1C%','Lymphocytes%', # 'MCH','MCHC','MCV','MPV','Monocytes%','Neutrophils%','Phosphorus','Platelets','Potassium','RBC', # 'RDW','Sodium','TSH','WBC','AlkalinePhosphatase','GGT','LDH','Iron','LDLCholesterol','Magnesium', # 'Triglycerides','TotalProtein','TotalBilirubin','Urea'] mb_columns += ['ALT','Albumin','AST','Basophils%','Calcium','Chloride','Cholesterol,total','Creatinine', 'CRP(WIDERANGE)','CRPhs','Eosinophils%','HCT','HDLCholesterol','Hemoglobin','HbA1C%','Lymphocytes%', 'MCH','MCHC','MCV','MPV','Monocytes%','Neutrophils%','Phosphorus','Platelets','Potassium','RBC',", "args: args=list(args)+['covars','blood','glucose','ffq','antropo'] mb_columns += ['Age','Gender','Calories_kcal','Carbs_g','Fat_g','Protain_g','IsGenotek'] if 'include_allPNP' not in args or ('PCs') in", "(pheno_s>pheno_s.min().min()).sum(axis=1) alpha_diversity_g = (pheno_g>pheno_g.min().min()).sum(axis=1) pheno.loc[pheno.Hips==-9, 'WHR'] = np.nan pheno.loc[pheno.Waist==-9, 'WHR'] = np.nan pheno['LDLCholesterol']", "'Kif Kef', 'Mustard', 'Coke', 'Vegetable Soup', 'Sausages', 'Pancake', 'Pasta', 'Sauteed vegetables', 'Plum', 'Goat", "'s_stats_pheno', 'fid', 'keep_household', 'no_log', 'keep_related', 'keep_sterile', '16s', '-9', 'covars_noPCs', 'PCs', 'lactose','include_allPNP','IsGenotek','permute','meals','other','drugs', 'calories','bloodType','questionnaires','keep_missingCovars','activity','activityTypesFreq','cereals', 'delivery','dressSweetners','drinks','fruits','hunger',", "presence=presence[presence > len(presence)*ratio].index.values.tolist() pheno=pheno[other_columns+presence] if ('keep_related' not in args): #bed = Bed(os.path.join(cleanDataPath, 'PNP_autosomal_clean2_nodfukim_norelated'),", "'genotek_only' in args: pheno = pheno.loc[pheno['IsGenotek']==1] if 'swab_only' in args: pheno = pheno.loc[pheno['IsGenotek']==0]", "'blood', 'glucose', 'ffq', 'antropo', 's_stats_pheno', 'fid', 'keep_household', 'no_log', 'keep_related', 'keep_sterile', '16s', '-9', 'covars_noPCs',", "('taxa' in list(kwargs.keys())): assert len(set(['all_bac','s', 'g','f','o','c','p','otu']).intersection(set(args)))==0, \\ 'taxa is mutual exclusive with all_bac,s,g,f,o,c,p,otu'", "if ('taxa' in list(kwargs.keys())): assert len(set(['all_bac','s', 'g','f','o','c','p','otu']).intersection(set(args)))==0, \\ 'taxa is mutual exclusive with", "breastfeeding'] if 'other' in args: #AddingIrisGlucose df_glucose = pandas.read_csv(glycemicStatusPath).set_index('RegNum') pheno = df_glucose.merge(pheno, left_index=True,", "or Clementine Freq', 'Orange or Grapefruit Freq', 'Orange or Grapefruit Juice Freq', 'Apple", "Freq', 'Watermelon Freq', 'Dried Fruits Freq', 'Fruit Salad Freq'] if 'hunger' in args:", "'Beef', 'Meatballs', 'Hamburger sandwich', 'Chicken thighs', 'Granola', 'Beet', 'Couscous', 'Beet Salad', 'Chocolate Mousse", "'Calories_kcal','Carbs_g','Fat_g','Protain_g' #3 participant 86356,762339,805175 have no 'Age','Gender' # if set(['Age','Gender','Calories_kcal','Carbs_g','Fat_g','Protain_g','IsGenotek'])<=set(pheno.columns.values): keep_inds=pheno.loc[:,['Age','Gender','Calories_kcal','Carbs_g','Fat_g','Protain_g','IsGenotek']].replace(-9, np.nan).dropna().index.values beforeNumParticpants=pheno.shape[0]", "cucumber', 'Soymilk', 'Dates', 'Croissant', 'Biscuit', 'Potato chips', 'White Cheese', 'French fries', 'Wholemeal Bread',", "as an addition for Salads or Stews Freq','Mayonnaise Including Light Freq', 'Thousand Island", "for c in pheno.columns if c[:2]=='g_' ] if 'f' in args: mb_columns +=", "if 'calories' not in args: features_to_drop +=['Age','Gender','Calories_kcal','Carbs_g','Fat_g','Protain_g','IsGenotek'] else: features_to_drop +=['Age','Gender'] if ('include_allPNP' not", "for arg in args: assert arg in known_args, 'unkown arg: %s'%(arg) for kwarg", "< 4].index else: sterile_individuals = alpha_diversity_s[alpha_diversity_s < 15].index pheno=pheno[~pheno.index.isin(sterile_individuals)] if 'keep_household' not in", "Vegetable Salads Freq', 'Pickled Vegetables Freq', 'Olives Freq'] if 'womenOnlyQuestions' in args: mb_columns_extra", "'Cooked cauliflower', 'Cooked Sweet potato', 'Butter', 'Omelette', 'Coated Wafers', 'Boiled corn', 'Chicken drumstick',", "freq'] if 'bloodType' in args: mb_columns_extra += ['Blood A','Blood B','Blood RH-'] if 'cereals'", "honey', 'Vinaigrette', 'Bamba', 'Dark Chocolate', 'Turkey Shawarma', 'Olive oil', #u'Parmesan\\xc2\\xa0cheese', 'Guacamole', 'Coleslaw', 'Americano',", "for c in pheno.columns if c[:2]=='p_' ] if 'otu' in args: mb_columns +=", "Freq', 'Sweet Dry Wine, Cocktails Freq', 'Alcoholic Drinks Freq'] if 'fruits' in args:", "+1e-5).astype(int).sum() presence=presence[presence > len(presence)*ratio].index.values.tolist() pheno=pheno[other_columns+presence] if ('keep_related' not in args): #bed = Bed(os.path.join(cleanDataPath,", "df_household = df_household[df_household.index.isin(pheno.index)] remove_inds = df_household.index[vc.VertexCut().work(df_household.values, 0.5)] pheno=pheno[~pheno.index.isin(remove_inds)] if 'keep_missingCovars' not in args:", "Pickled, Dried, Smoked, Canned Freq'] if 'pastry' in args: mb_columns_extra += ['Ordinary Bread", "Hamin, Cuba Freq', 'Mixed Chicken or Turkey Dishes Freq', 'Beef or Chicken Soup", "Freq', 'Coated or Stuffed Cookies, Waffles or Biscuits Freq', 'Simple Cookies or Biscuits", "['Stress','Sleep quality'] if 'smoking' in args: mb_columns_extra += ['Currently smokes','Ever smoked'] if 'sweets'", "'ratio' in kwargs: ratio=kwargs['ratio'] mb_columns = [c for c in pheno.columns if c[:2]", "Broccoli Freq','Sweet Potato Freq', 'Brussels Sprouts, Green or Red Cabbage Freq', 'Lettuce Freq','Carrots,", "to remove on the fly import ForPaper.VertexCut as vc df_household = pandas.read_csv(os.path.join(cleanDataPath, 'EnvironmentBlock.txt'),", "remove him\" features_to_drop=[] if ('IsGenotek' not in args) and ('covars' not in args)", "'Orange or Grapefruit Juice Freq', 'Apple Freq', 'Apricot Fresh or Dry, or Loquat", "Freq', 'Banana Freq', 'Melon Freq', 'Kiwi or Strawberries Freq', 'Mango Freq', 'Peach, Nectarine,", "Stews Freq','Mayonnaise Including Light Freq', 'Thousand Island Dressing, Garlic Dressing Freq', 'Honey, Jam,", "Rye Bread Freq', 'Baguette Freq', 'Roll or Bageles Freq', 'Pita Freq', 'Saltine Crackers", "'Mango Freq', 'Peach, Nectarine, Plum Freq', 'Pear Fresh, Cooked or Canned Freq','Persimmon Freq',", "'Chicken or Turkey With Skin Freq', 'Chicken or Turkey Without Skin Freq', 'Sausages", "args: mb_columns_extra += ['General Hunger','Morning Hunger', 'Midday Hunger', 'Evening Hunger'] if 'legumes' in", "Soda Freq', 'Regular Sodas with Sugar Freq', 'Decaffeinated Coffee Freq', 'Coffee Freq', 'Herbal", "'Cooked mushrooms', 'Watermelon', 'Grilled cheese', 'Bissli', 'Pullet', 'Hummus', 'Chinese Chicken Noodles', 'Shakshouka', 'Tahini',", "'all_bac', 's', 'g','f','o','c','p','otu', 'all_non_bac', 'covars', 'blood', 'glucose', 'ffq', 'antropo', 's_stats_pheno', 'fid', 'keep_household', 'no_log',", "Freq', 'Egg, Hard Boiled or Soft Freq', 'Schnitzel Turkey or Chicken Freq', 'Chicken", "PNP_16S_DIR = '/net/mraid08/export/jafar/Microbiome/Analyses/PNPChip/16S' glycemicStatusPath='/net/mraid08/export/jafar/Microbiome/Analyses/PNPChip/glycemic_status.csv' def extract(*args,**kwargs): known_args = ['dic', 'all_bac', 's', 'g','f','o','c','p','otu', 'all_non_bac',", "c[:2]=='g_']] else: if 'include_allPNP' in args: pheno =pandas.read_csv(pheno_fn_bacAllPNP,sep='\\t') else: pheno =pandas.read_csv(pheno_fn_bac,sep='\\t') pheno.set_index('IID', inplace=True,", "= np.nan pheno['LDLCholesterol'] = pheno['Cholesterol,total'] - pheno['HDLCholesterol'] - 2*pheno['Triglycerides'] if 'genotek_only' in args:", "[c for c in pheno.columns if c[:2]=='p_' ] if 'otu' in args: mb_columns", "'Sweet potato', 'Wine', 'Cookies', 'Challah', 'Spelled', 'Honey', 'Green beans', 'Milk', 'Peanut Butter', 'Cooked", "pheno['Cholesterol,total'] - pheno['HDLCholesterol'] - 2*pheno['Triglycerides'] if 'genotek_only' in args: pheno = pheno.loc[pheno['IsGenotek']==1] if", "'Brazil nuts', 'Orange', 'Rice', 'Diet Fruit Drink', 'Corn schnitzel', 'Cappuccino', 'Low fat Milk',", "'Pasta or Flakes Freq'] if 'delivery' in args: mb_columns_extra += ['C-Section','Home delivery','Was breastfed']", "mb_columns_extra += ['Is pregnant','Is breastfeeding','Is after birth', 'Taking contraceptives', 'Regular period', 'Irregular period',", "Dressing or Oil Freq', 'Avocado Freq','Lemon Freq', 'Onion Freq', 'Garlic Freq', 'Vegetable Soup", "kwarg in known_kwargs, 'unkown kwarg: %s'%(kwarg) if ('16s' in args): assert 'dic' not", "c in pheno: print c mb_columns=list(set(mb_columns)) pheno= pheno[mb_columns] if 'threshold' not in kwargs:", "Freq', 'Apple Freq', 'Apricot Fresh or Dry, or Loquat Freq', 'Grapes or Raisins", "in args: presence=((pheno[mb_columns]>threshold +1e-5)&(pheno[mb_columns]!=0)).astype(int).sum() else: presence=(pheno[mb_columns]>threshold +1e-5).astype(int).sum() presence=presence[presence > len(presence)*ratio].index.values.tolist() pheno=pheno[other_columns+presence] if ('keep_related'", "delim_whitespace=True, index_col=0, header=None) df_related=df_fam[~df_fam.index.isin(df_fam_no_related.index)] pheno=pheno[(~pheno.index.isin(df_related.index))] if ('keep_sterile') not in args: if '16s' in", "for participant in pheno[['Age','Gender']].index.values: # if np.isnan(pheno.loc[participant,'Calories_kcal']) or \\ # np.isnan(pheno.loc[participant,'Carbs_g']) or \\", "Freq', 'Fish Cooked, Baked or Grilled Freq', 'Fried Fish Freq', 'Canned Tuna or", "] if 'otu' in args: mb_columns += [c for c in pheno.columns if", "in args): #bed = Bed(os.path.join(cleanDataPath, 'PNP_autosomal_clean2_nodfukim_norelated'), count_A1=True)#.read() df_fam_no_related = pandas.read_csv(os.path.join(cleanDataPath, 'PNP_autosomal_clean2_nodfukim_norelated.fam'), delim_whitespace=True, index_col=0,", "if 'all_non_bac' in args: args=list(args)+['covars','blood','glucose','ffq','antropo'] mb_columns += ['Age','Gender','Calories_kcal','Carbs_g','Fat_g','Protain_g','IsGenotek'] if 'include_allPNP' not in args", "cut', 'Gilthead Bream', 'Garlic', 'Grapes', 'Chocolate Chip Cookies', 'Cucumber', 'Mung Bean', 'Ketchup', 'Sweet", "Freq', 'Jachnun, Mlawah, Kubana, Cigars Freq', 'Pizza Freq'] if 'qualityOfLiving' in args: mb_columns_extra", "Soup', 'Sausages', 'Pancake', 'Pasta', 'Sauteed vegetables', 'Plum', 'Goat Milk Yogurt', 'Orange juice', 'Potatoes',", "# print len(set(phenoChip.index)-set(phenoAll.index)) print(pheno.columns.values.tolist()) print(pheno.shape) sum=0 for participant in pheno[['Age','Gender']].index.values: # if np.isnan(pheno.loc[participant,'Calories_kcal'])", "['s_','g_','f_','o_','c_','p_']] if 'dic' in args: presence=((pheno[mb_columns]>threshold +1e-5)&(pheno[mb_columns]!=0)).astype(int).sum() else: presence=(pheno[mb_columns]>threshold +1e-5).astype(int).sum() presence=presence[presence > len(presence)*ratio].index.values.tolist()", "beans', 'Milk', 'Peanut Butter', 'Cooked carrots', 'Lemon', 'Salty Cookies', 'Beef', 'Meatballs', 'Hamburger sandwich',", "'Mandarin', 'Pesto', 'Sugar substitute', 'Cheesecake', 'Raisins', 'Chocolate', 'Quinoa', 'Cooked broccoli', 'Beef Cholent', 'Cracker',", "numpy as np import sys from pysnptools.snpreader.bed import Bed import subprocess cleanDataPath='/net/mraid08/export/jafar/Microbiome/Analyses/PNPChip/cleanData/' rawDataPath='/net/mraid08/export/jafar/Microbiome/Analyses/PNPChip/rawData'", "or Loquat Freq', 'Grapes or Raisins Freq', 'Banana Freq', 'Melon Freq', 'Kiwi or", "Freq', 'Beef or Chicken Soup Freq', 'Internal Organs Freq', 'Fish Cooked, Baked or", "args): features_to_drop += ['IsGenotek'] if ('covars' not in args) and ('covars_noPCs' not in", "\\ # np.isnan(pheno.loc[participant,'Carbs_g']) or \\ # np.isnan(pheno.loc[participant,'Fat_g']) or \\ # np.isnan(pheno.loc[participant,'Protain_g']): # sum+=1", "START##################### if 'questionnaires' in args: args=list(args)+ffq_args mb_columns_extra=[] if 'activity' in args: mb_columns_extra +=", "c in pheno: ### if (c[:2] not in ['c_', 'g_', 'o_', 's_', 'k_',", "'Goat Milk Yogurt', 'Orange juice', 'Potatoes', 'Halva', 'Yellow pepper', 'Mango', 'Lasagna', 'Popcorn', 'Hummus", "'Cooked beets', 'Almonds', 'Falafel', 'Noodles', 'Jachnun', 'Turkey', 'Sushi', 'Brazil nuts', 'Orange', 'Rice', 'Diet", "'Coconut milk', 'Pretzels', 'Kohlrabi', 'Eggplant Salad', 'Cooked green beans', 'Cooked mushrooms', 'Watermelon', 'Grilled", "if 'cereals' in args: mb_columns_extra += ['Cornflakes Freq','Granola or Bernflaks Freq','Cooked Cereal such", "'Bread', 'Onion', 'Cream Cheese', 'Chicken soup', 'Wholemeal Roll', 'Canned corn', 'Salty Cheese', 'Melawach',", "why we remove him\" features_to_drop=[] if ('IsGenotek' not in args) and ('covars' not", "in args: assert 'dic' not in args, 'include_allPNP does not support dicotomize bacteria'", "Freq', 'Pasta or Flakes Freq'] if 'delivery' in args: mb_columns_extra += ['C-Section','Home delivery','Was", "for initial in ('s_', 'g_', 'f_', 'o_', 'c_', 'p_')] elif kwargs['taxa'][1]=='_': kwargs['taxa']=[kwargs['taxa']] for", "Gum', 'Hamburger', 'Dark Beer', 'Cooked beets', 'Almonds', 'Falafel', 'Noodles', 'Jachnun', 'Turkey', 'Sushi', 'Brazil", "arg in args: mb_columns += [arg] mb_columns_extra=[val.replace(' ','_') for val in mb_columns_extra] mb_columns+=mb_columns_extra", "Freq', 'Wholemeal or Rye Bread Freq', 'Baguette Freq', 'Roll or Bageles Freq', 'Pita", "if ('keep_related' not in args): #bed = Bed(os.path.join(cleanDataPath, 'PNP_autosomal_clean2_nodfukim_norelated'), count_A1=True)#.read() df_fam_no_related = pandas.read_csv(os.path.join(cleanDataPath,", "=os.path.join(cleanDataPath,'allChipPhenotypes_nodfukimWith5PCair.phenotypes') pheno_fn_bacDic=os.path.join(cleanDataPath,'dicNoMissingKakiPhenotypesWithCovariates_nodfukim.phenotypes') pheno_fn_bacAllPNP=os.path.join(rawDataPath,'allPNPPhenotypes.phenotypes') iidsNoSharedEnv='/net/mraid08/export/genie/Microbiome/Analyses/PNPChip/cleanData/PNP_autosomal_clean2_nodfukim_NoCouples.txt' PNP_16S_DIR = '/net/mraid08/export/jafar/Microbiome/Analyses/PNPChip/16S' glycemicStatusPath='/net/mraid08/export/jafar/Microbiome/Analyses/PNPChip/glycemic_status.csv' def extract(*args,**kwargs): known_args = ['dic',", "args: s_stats=['BMI','Cholesterol,total','WakeupGlucose','Albumin','Creatinine','HbA1C%','Height','Hips','Waist','WHR','HDLCholesterol'] #, 'Triglycerides', 'LDLCholesterol'] mb_columns+=s_stats mb_columns=list(set(mb_columns)) if 'fid' in args: mb_columns =", "breastfeeding','Is after birth', 'Taking contraceptives', 'Regular period', 'Irregular period', 'No period','Hormonal replacment', 'Past", "['Cornflakes Freq','Granola or Bernflaks Freq','Cooked Cereal such as Oatmeal Porridge Freq', 'Rice Freq','Couscous,", "args: mb_columns_extra += ['T1Activity kind','Type 1 activity - freq','T2Activity kind', 'Type 2 activity", "Freq', 'Beer Freq', 'Sweet Dry Wine, Cocktails Freq', 'Alcoholic Drinks Freq'] if 'fruits'", "Grilled Freq', 'Fried Fish Freq', 'Canned Tuna or Tuna Salad Freq', 'Fish (not", "'vegetables' in args: mb_columns_extra += ['Tomato Freq','Cooked Tomatoes, Tomato Sauce, Tomato Soup Freq',", "'activity' in args: mb_columns_extra += ['Work activity','Physical activity - mins','Physical activity - freq']", "Bolognese', 'Chicken Meatballs', 'Burekas', 'Carrots', 'Tofu', 'Wholemeal Pita', 'Sunflower seeds', 'Coriander', 'Ciabatta', 'Tomato", "args=list(args)+ffq_args mb_columns_extra=[] if 'activity' in args: mb_columns_extra += ['Work activity','Physical activity - mins','Physical", "pheno = pheno.loc[pheno['IsGenotek']==0] mb_columns = [] if 'taxa' in kwargs: if kwargs['taxa'][0]=='*': kwargs['taxa']=[initial+kwargs['taxa'][1:]", "'Zucchini or Eggplant Freq','Peas, Green Beans or Okra Cooked Freq', 'Cauliflower or Broccoli", "pheno.columns if c[:2]=='f_' ] if 'o' in args: mb_columns += [c for c", "Freq','Sweet Potato Freq', 'Brussels Sprouts, Green or Red Cabbage Freq', 'Lettuce Freq','Carrots, Fresh", "or Brown Sugar Freq', 'Artificial Sweeteners Freq',] if 'drinks' in args: mb_columns_extra +=", "if arg in args: mb_columns += [arg] mb_columns_extra=[val.replace(' ','_') for val in mb_columns_extra]", "'Apple', 'Lettuce Salad', 'Cereals', 'Yellow Cheese', 'Tea', 'Beer', 'Mozzarella Cheese', 'Fried onions', 'Ice", "pheno.loc[pheno.Hips==-9, 'WHR'] = np.nan pheno.loc[pheno.Waist==-9, 'WHR'] = np.nan pheno['LDLCholesterol'] = pheno['Cholesterol,total'] - pheno['HDLCholesterol']", "'Cooked Legumes Freq', 'Processed Meat Free Products Freq'] if 'meatProducts' in args: mb_columns_extra", "c[:2]=='PC'] if 'lactose' in args: mb_columns += ['lactose'] if 'blood' in args: #", "# 'RDW','Sodium','TSH','WBC','AlkalinePhosphatase','GGT','LDH','Iron','LDLCholesterol','Magnesium', # 'Triglycerides','TotalProtein','TotalBilirubin','Urea'] mb_columns += ['ALT','Albumin','AST','Basophils%','Calcium','Chloride','Cholesterol,total','Creatinine', 'CRP(WIDERANGE)','CRPhs','Eosinophils%','HCT','HDLCholesterol','Hemoglobin','HbA1C%','Lymphocytes%', 'MCH','MCHC','MCV','MPV','Monocytes%','Neutrophils%','Phosphorus','Platelets','Potassium','RBC', 'RDW','Sodium','TSH','WBC','LDLCholesterol'] if 'glucose' in", "that decides which individuals to remove on the fly import ForPaper.VertexCut as vc", "'Sauteed vegetables', 'Plum', 'Goat Milk Yogurt', 'Orange juice', 'Potatoes', 'Halva', 'Yellow pepper', 'Mango',", "'dressSweetners' in args: mb_columns_extra += ['Oil as an addition for Salads or Stews", "ForPaper.VertexCut as vc df_household = pandas.read_csv(os.path.join(cleanDataPath, 'EnvironmentBlock.txt'), delim_whitespace=True) df_household = df_household[[c for c", "Freq', 'Artificial Sweeteners Freq',] if 'drinks' in args: mb_columns_extra += ['Nectar, Cider Freq',", "from pysnptools.snpreader.bed import Bed import subprocess cleanDataPath='/net/mraid08/export/jafar/Microbiome/Analyses/PNPChip/cleanData/' rawDataPath='/net/mraid08/export/jafar/Microbiome/Analyses/PNPChip/rawData' pheno_fn_bac =os.path.join(cleanDataPath,'noMissingKakiPhenotypesWithCovariates_nodfukim.phenotypes') #pheno_fn_bac =os.path.join(cleanDataPath,'allChipPhenotypes_nodfukimWith5PCair.phenotypes') pheno_fn_bacDic=os.path.join(cleanDataPath,'dicNoMissingKakiPhenotypesWithCovariates_nodfukim.phenotypes')", "'Peanut Butter', 'Cooked carrots', 'Lemon', 'Salty Cookies', 'Beef', 'Meatballs', 'Hamburger sandwich', 'Chicken thighs',", "Salads or Stews Freq','Mayonnaise Including Light Freq', 'Thousand Island Dressing, Garlic Dressing Freq',", "Cereal such as Oatmeal Porridge Freq', 'Rice Freq','Couscous, Burgul, Mamaliga, Groats Freq', 'Potatoes", "'dic' in args: presence=((pheno[mb_columns]>threshold +1e-5)&(pheno[mb_columns]!=0)).astype(int).sum() else: presence=(pheno[mb_columns]>threshold +1e-5).astype(int).sum() presence=presence[presence > len(presence)*ratio].index.values.tolist() pheno=pheno[other_columns+presence] if", "'Cooked broccoli', 'Beef Cholent', 'Cracker', 'Chocolate Cookies', 'White beans', 'Cooked zucchini', 'Sweet potato',", "pheno_s = pheno[[c for c in pheno.columns if c[:2]=='s_']] pheno_g = pheno[[c for", "+= ['Stress','Sleep quality'] if 'smoking' in args: mb_columns_extra += ['Currently smokes','Ever smoked'] if", "= ['FID']+mb_columns ########################FFQ START##################### if 'questionnaires' in args: args=list(args)+ffq_args mb_columns_extra=[] if 'activity' in", "Meatballs', 'Burekas', 'Carrots', 'Tofu', 'Wholemeal Pita', 'Sunflower seeds', 'Coriander', 'Ciabatta', 'Tomato sauce', 'Heavy", "freq'] if 'activityTypesFreq' in args: mb_columns_extra += ['T1Activity kind','Type 1 activity - freq','T2Activity", "or Okra Cooked Freq', 'Cauliflower or Broccoli Freq','Sweet Potato Freq', 'Brussels Sprouts, Green", "in args: mb_columns += [arg] mb_columns_extra=[val.replace(' ','_') for val in mb_columns_extra] mb_columns+=mb_columns_extra if", "'Fried eggplant', 'Salmon', 'Cashew', 'Jewish donut', 'Rugelach', 'Cake', 'Ravioli', 'Tomatoes', 'Wholemeal Light Bread',", "Chicken Noodles', 'Shakshouka', 'Tahini', 'Chicken breast', 'Steak', 'Light Bread', 'Wholemeal Crackers', 'Sugar Free", "in args: # mb_columns += ['ALT','Albumin','AST','Basophils%','Calcium','Chloride','Cholesterol,total','Creatinine', # 'CRP(WIDERANGE)','CRPhs','Eosinophils%','HCT','HDLCholesterol','Hemoglobin','HbA1C%','Lymphocytes%', # 'MCH','MCHC','MCV','MPV','Monocytes%','Neutrophils%','Phosphorus','Platelets','Potassium','RBC', # 'RDW','Sodium','TSH','WBC','AlkalinePhosphatase','GGT','LDH','Iron','LDLCholesterol','Magnesium', #", "c in pheno_nodic.columns if c[:2]=='g_']] else: if 'include_allPNP' in args: pheno =pandas.read_csv(pheno_fn_bacAllPNP,sep='\\t') else:", "in pheno.columns if c[:2]=='o_' ] if 'c' in args: mb_columns += [c for", "mb_columns += [c for c in pheno.columns if c[:2]=='o_' ] if 'c' in", "df_taxa = np.log10(df_taxa) pheno = pheno.merge(df_taxa, left_index=True, right_index=True) pheno_s = pheno[[c for c", "phenoAll = extract('s','include_allPNP','covars')#'include_allPNP','keep_household','ffq','keep_related')#'include_allPNP', print(phenoAll.shape) print(phenoAll.columns) phenoAll = extract('s','include_allPNP') print(phenoAll.shape) print(phenoAll.columns) phenoChip = extract('keep_household','s','keep_related')", "+= [c for c in pheno.columns if c[:2]=='c_' ] if 'p' in args:", "Freq', 'Avocado Freq','Lemon Freq', 'Onion Freq', 'Garlic Freq', 'Vegetable Soup Freq', 'Hummus Salad", "'Brown Sugar', 'Peanuts', 'Mayonnaise', 'Persimmon', 'Apple juice', 'Stuffed Peppers', 'Egg', 'Pear', 'Peas', 'Pecan',", "mb_columns_extra += ['Mandarin or Clementine Freq', 'Orange or Grapefruit Freq', 'Orange or Grapefruit", "pheno =pandas.read_csv(pheno_fn_bacAllPNP,sep='\\t') else: pheno =pandas.read_csv(pheno_fn_bac,sep='\\t') pheno.set_index('IID', inplace=True, drop=True) if 'include_allPNP'in args: status, output", "Wafers', 'Boiled corn', 'Chicken drumstick', 'Pita', 'Pasta Bolognese', 'Chicken Meatballs', 'Burekas', 'Carrots', 'Tofu',", "for c in pheno.columns if c[:4]=='OTU_' ] if 'no_log' in args: assert 'dic'", "Freq', 'Baguette Freq', 'Roll or Bageles Freq', 'Pita Freq', 'Saltine Crackers or Matzah", "Cheese', 'Fried onions', 'Ice cream', 'Cream Cake', 'Green cabbage', 'Olives', 'Balsamic vinegar', 'Peach',", "'smoking','sweets','vegetables','womenOnlyQuestions'] drug_args=['D.lipid', 'D.All', 'D.Psychiatric', 'D.pain', 'D.CVD', 'D.GI','D.Thyroid', 'D.NSAID','D.Contraception'] meals=['Vodka or Arak', 'Avocado', 'Parsley',", "Pita Bread Freq', 'Falafel in Pita version 2 Freq','Processed Meat Products Freq','Beef, Veal,", "mb_columns += mealsColumns ########################FFQ END##################### #for c in pheno: print c mb_columns=list(set(mb_columns)) pheno=", "cauliflower', 'Cooked Sweet potato', 'Butter', 'Omelette', 'Coated Wafers', 'Boiled corn', 'Chicken drumstick', 'Pita',", "'Cream Cake', 'Green cabbage', 'Olives', 'Balsamic vinegar', 'Peach', 'Light Yellow Cheese', 'Red pepper',", "'Parsley', 'Coated peanuts', 'Sugar', 'Smoked Salmon', 'Melon', 'Roll', 'Whipped cream', 'Coconut milk', 'Pretzels',", "or Biscuits Freq', 'Ice Cream or Popsicle which contains Dairy Freq', 'Popsicle Without", "in args) and ('covars_noPCs' not in args): features_to_drop += ['IsGenotek'] if ('covars' not", "'Coated or Stuffed Cookies, Waffles or Biscuits Freq', 'Simple Cookies or Biscuits Freq',", "+= ['lactose'] if 'blood' in args: # mb_columns += ['ALT','Albumin','AST','Basophils%','Calcium','Chloride','Cholesterol,total','Creatinine', # 'CRP(WIDERANGE)','CRPhs','Eosinophils%','HCT','HDLCholesterol','Hemoglobin','HbA1C%','Lymphocytes%', #", "Cider Freq', 'Diet Juice Freq', 'Juice Freq', 'Diet Soda Freq', 'Regular Sodas with", "dic are mutually exclusive' if ('taxa' in list(kwargs.keys())): assert len(set(['all_bac','s', 'g','f','o','c','p','otu']).intersection(set(args)))==0, \\ 'taxa", "'drinks' in args: mb_columns_extra += ['Nectar, Cider Freq', 'Diet Juice Freq', 'Juice Freq',", "'Brussels Sprouts, Green or Red Cabbage Freq', 'Lettuce Freq','Carrots, Fresh or Cooked, Carrot", "args: mb_columns += ['Alcoholic_Drinks_Freq','Cigarretes_per_day','Coffee_Freq','Start_smoking_age'] if 'antropo' in args: mb_columns += ['BMI','BPdia','BPsys','HeartRate','Height','Hips','WHR','Waist'] if 's_stats_pheno'", "or Popsicle which contains Dairy Freq', 'Popsicle Without Dairy Freq', 'Black or White", "'Hummus Salad Freq', 'Tahini Salad Freq', 'Cooked Vegetable Salads Freq', 'Pickled Vegetables Freq',", "Potatoes Salad Freq', 'Fries Freq', 'Pasta or Flakes Freq'] if 'delivery' in args:", "'Sugar Free Gum', 'Hamburger', 'Dark Beer', 'Cooked beets', 'Almonds', 'Falafel', 'Noodles', 'Jachnun', 'Turkey',", "ffq_args = ['activity','activityTypesFreq','cereals', 'delivery','dressSweetners','drinks','fruits','hunger', 'legumes','meatProducts','pastry','qualityOfLiving', 'smoking','sweets','vegetables','womenOnlyQuestions'] drug_args=['D.lipid', 'D.All', 'D.Psychiatric', 'D.pain', 'D.CVD', 'D.GI','D.Thyroid', 'D.NSAID','D.Contraception']", "Drinks Freq'] if 'fruits' in args: mb_columns_extra += ['Mandarin or Clementine Freq', 'Orange", "assert 'dic' not in args, 'dic and no_log are mutually exclusive' pheno[mb_columns] =", "Rogallach, Croissant or Donut Freq', 'Cake, Torte Cakes, Chocolate Cake Freq', 'Fruit Pie", "pheno['LDLCholesterol'] = pheno['Cholesterol,total'] - pheno['HDLCholesterol'] - 2*pheno['Triglycerides'] if 'genotek_only' in args: pheno =", "'Chocolate Chip Cookies', 'Cucumber', 'Mung Bean', 'Ketchup', 'Sweet Yogurt', 'Bread', 'Onion', 'Cream Cheese',", "c in pheno.columns if c[:2]=='f_' ] if 'o' in args: mb_columns += [c", "Recipes Freq', 'Egg, Hard Boiled or Soft Freq', 'Schnitzel Turkey or Chicken Freq',", "Freq', 'Fresh Vegetable Salad With Dressing or Oil Freq', 'Avocado Freq','Lemon Freq', 'Onion", "args: mb_columns += [c for c in pheno.columns if c[:2]=='s_' ] if 'g'", "'Tomato sauce', 'Heavy cream', 'Banana', 'Kif Kef', 'Mustard', 'Coke', 'Vegetable Soup', 'Sausages', 'Pancake',", "With Dressing or Oil Freq', 'Avocado Freq','Lemon Freq', 'Onion Freq', 'Garlic Freq', 'Vegetable", "'PNP_autosomal_clean2_nodfukim_norelated.fam'), delim_whitespace=True, index_col=0, header=None) df_fam = pandas.read_csv(os.path.join(cleanDataPath, 'PNP_autosomal_clean2_nodfukim.fam'), delim_whitespace=True, index_col=0, header=None) df_related=df_fam[~df_fam.index.isin(df_fam_no_related.index)] pheno=pheno[(~pheno.index.isin(df_related.index))]", "args: assert arg in known_args, 'unkown arg: %s'%(arg) for kwarg in list(kwargs.keys()): assert", "in mb_columns_extra] mb_columns+=mb_columns_extra if 'meals' in args: mealsColumns=[val.replace(' ','_') for val in meals]", "'covars' not in args, 'IsGenotek and covars are mutually exclusive' if 'otu' in", "in pheno.columns if c[:2]=='g_' ] if 'f' in args: mb_columns += [c for", "'Ketchup', 'Sweet Yogurt', 'Bread', 'Onion', 'Cream Cheese', 'Chicken soup', 'Wholemeal Roll', 'Canned corn',", "'Yellow Cheese', 'Tea', 'Beer', 'Mozzarella Cheese', 'Fried onions', 'Ice cream', 'Cream Cake', 'Green", "'Pizza', 'Fried cauliflower', 'Roasted eggplant', 'Baguette', 'Lentil Soup', 'Tzfatit Cheese', 'Nectarine', 'Chicken legs',", "if c[:2] not in ['s_','g_','f_','o_','c_','p_']] if 'dic' in args: presence=((pheno[mb_columns]>threshold +1e-5)&(pheno[mb_columns]!=0)).astype(int).sum() else: presence=(pheno[mb_columns]>threshold", "Sweeteners Freq',] if 'drinks' in args: mb_columns_extra += ['Nectar, Cider Freq', 'Diet Juice", "if np.isnan(pheno.loc[participant,'Calories_kcal']) or \\ # np.isnan(pheno.loc[participant,'Carbs_g']) or \\ # np.isnan(pheno.loc[participant,'Fat_g']) or \\ #", "not in args) and ('PCs' not in args): features_to_drop+=['PC1','PC2','PC3','PC4','PC5'] pheno=pheno.drop(features_to_drop,axis=1) if ('-9' not", "'Evening Hunger'] if 'legumes' in args: mb_columns_extra += ['Falafel in Pita Bread Freq',", "- mins','Physical activity - freq'] if 'activityTypesFreq' in args: mb_columns_extra += ['T1Activity kind','Type", "Freq', 'Rice Freq','Couscous, Burgul, Mamaliga, Groats Freq', 'Potatoes Boiled, Baked, Mashed, Potatoes Salad", "mb_columns=list(set(mb_columns)) pheno= pheno[mb_columns] if 'threshold' not in kwargs: threshold = -4 else: threshold=kwargs['threshold']", "Freq', 'Fruit Salad Freq'] if 'hunger' in args: mb_columns_extra += ['General Hunger','Morning Hunger',", "mb_columns += ['Age','Gender','Calories_kcal','Carbs_g','Fat_g','Protain_g','IsGenotek'] if 'include_allPNP' not in args or ('PCs') in args: mb_columns", "args: mb_columns_extra += ['Cornflakes Freq','Granola or Bernflaks Freq','Cooked Cereal such as Oatmeal Porridge", "('keep_related' not in args): #bed = Bed(os.path.join(cleanDataPath, 'PNP_autosomal_clean2_nodfukim_norelated'), count_A1=True)#.read() df_fam_no_related = pandas.read_csv(os.path.join(cleanDataPath, 'PNP_autosomal_clean2_nodfukim_norelated.fam'),", "in args: mb_columns_extra += ['C-Section','Home delivery','Was breastfed'] if 'dressSweetners' in args: mb_columns_extra +=", "np.nan).dropna().index.values beforeNumParticpants=pheno.shape[0] pheno=pheno.loc[keep_inds] afterNumParticpants=pheno.shape[0] if beforeNumParticpants-afterNumParticpants>0: pass #print \"Removing participants with missing covars!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\"", "mb_columns+=drug_args else: for arg in drug_args: if arg in args: mb_columns += [arg]", "vinegar', 'Peach', 'Light Yellow Cheese', 'Red pepper', 'Bagel', 'Entrecote', 'Cottage cheese', 'Oil', 'Natural", "Bread', 'Marble Cake', 'Brown Rice', 'Cold cut', 'Gilthead Bream', 'Garlic', 'Grapes', 'Chocolate Chip", "in pheno if c[:2] not in ('s_', 'g_', 'f_', 'o_', 'c_', 'p_')]] for", "'o_', 'c_', 'p_')]] for taxa_level in ['otu', 'species', 'genus', 'family', 'order', 'class', 'phylum']:", "'Internal Organs Freq', 'Fish Cooked, Baked or Grilled Freq', 'Fried Fish Freq', 'Canned", "or Oil Freq', 'Fresh Vegetable Salad With Dressing or Oil Freq', 'Avocado Freq','Lemon", "arg in known_args, 'unkown arg: %s'%(arg) for kwarg in list(kwargs.keys()): assert kwarg in", "Salad Freq'] if 'hunger' in args: mb_columns_extra += ['General Hunger','Morning Hunger', 'Midday Hunger',", "pheno if c[:2] not in ('s_', 'g_', 'f_', 'o_', 'c_', 'p_')]] for taxa_level", "Shawarma', 'Olive oil', #u'Parmesan\\xc2\\xa0cheese', 'Guacamole', 'Coleslaw', 'Americano', 'Pesek Zman snack', 'Green onions', 'Mushrooms',", "phenoChip = extract('keep_household','s','keep_related') print(phenoChip.shape) print(phenoChip.columns) # print \"Only in chip:\" # print set(phenoChip.index.values)-set(phenoAll.index.values)", "if int(c) in pheno.index]] df_household = df_household[df_household.index.isin(pheno.index)] remove_inds = df_household.index[vc.VertexCut().work(df_household.values, 0.5)] pheno=pheno[~pheno.index.isin(remove_inds)] if", "'meals' in args: mealsColumns=[val.replace(' ','_') for val in meals] #Correct by total calories", "Bed import subprocess cleanDataPath='/net/mraid08/export/jafar/Microbiome/Analyses/PNPChip/cleanData/' rawDataPath='/net/mraid08/export/jafar/Microbiome/Analyses/PNPChip/rawData' pheno_fn_bac =os.path.join(cleanDataPath,'noMissingKakiPhenotypesWithCovariates_nodfukim.phenotypes') #pheno_fn_bac =os.path.join(cleanDataPath,'allChipPhenotypes_nodfukimWith5PCair.phenotypes') pheno_fn_bacDic=os.path.join(cleanDataPath,'dicNoMissingKakiPhenotypesWithCovariates_nodfukim.phenotypes') pheno_fn_bacAllPNP=os.path.join(rawDataPath,'allPNPPhenotypes.phenotypes') iidsNoSharedEnv='/net/mraid08/export/genie/Microbiome/Analyses/PNPChip/cleanData/PNP_autosomal_clean2_nodfukim_NoCouples.txt' PNP_16S_DIR", "cake', 'Apple', 'Lettuce Salad', 'Cereals', 'Yellow Cheese', 'Tea', 'Beer', 'Mozzarella Cheese', 'Fried onions',", "Freq', 'Parsley, Celery, Fennel, Dill, Cilantro, Green Onion Freq', 'Fresh Vegetable Salad Without", "Dishes as Moussaka, Hamin, Cuba Freq', 'Mixed Chicken or Turkey Dishes Freq', 'Beef", "print(phenoAll.columns) phenoChip = extract('keep_household','s','keep_related') print(phenoChip.shape) print(phenoChip.columns) # print \"Only in chip:\" # print", "for c in pheno.columns if c[:2] not in ['s_','g_','f_','o_','c_','p_']] if 'dic' in args:", "# np.isnan(pheno.loc[participant,'Carbs_g']) or \\ # np.isnan(pheno.loc[participant,'Fat_g']) or \\ # np.isnan(pheno.loc[participant,'Protain_g']): # sum+=1 #", "in args: args=list(args)+ffq_args mb_columns_extra=[] if 'activity' in args: mb_columns_extra += ['Work activity','Physical activity", "if c[:2]=='s_' ] if 'g' in args: mb_columns += [c for c in", "Legumes Freq', 'Processed Meat Free Products Freq'] if 'meatProducts' in args: mb_columns_extra +=", "'Lettuce Salad', 'Cereals', 'Yellow Cheese', 'Tea', 'Beer', 'Mozzarella Cheese', 'Fried onions', 'Ice cream',", "if 'delivery' in args: mb_columns_extra += ['C-Section','Home delivery','Was breastfed'] if 'dressSweetners' in args:", "= pheno.loc[pheno['IsGenotek']==1] if 'swab_only' in args: pheno = pheno.loc[pheno['IsGenotek']==0] mb_columns = [] if", "smoked'] if 'sweets' in args: mb_columns_extra += ['Milk or Dark Chocolate Freq', 'Salty", "afterNumParticpants=pheno.shape[0] if beforeNumParticpants-afterNumParticpants>0: pass #print \"Removing participants with missing covars!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\" #print beforeNumParticpants-afterNumParticpants #print", "%s -d ' ' | cut -f 1 -d '_'\"%os.path.join(rawDataPath,'tmp','dfukim.txt')) pheno =pheno[~pheno.index.isin([int(dafook) for", "'swab_only'] ffq_args = ['activity','activityTypesFreq','cereals', 'delivery','dressSweetners','drinks','fruits','hunger', 'legumes','meatProducts','pastry','qualityOfLiving', 'smoking','sweets','vegetables','womenOnlyQuestions'] drug_args=['D.lipid', 'D.All', 'D.Psychiatric', 'D.pain', 'D.CVD', 'D.GI','D.Thyroid',", "sauce', 'Heavy cream', 'Banana', 'Kif Kef', 'Mustard', 'Coke', 'Vegetable Soup', 'Sausages', 'Pancake', 'Pasta',", "'no_log' in args: assert 'dic' not in args, 'dic and no_log are mutually", "milk', 'Pretzels', 'Kohlrabi', 'Eggplant Salad', 'Cooked green beans', 'Cooked mushrooms', 'Watermelon', 'Grilled cheese',", "['Currently smokes','Ever smoked'] if 'sweets' in args: mb_columns_extra += ['Milk or Dark Chocolate", "Baked or Grilled Freq', 'Fried Fish Freq', 'Canned Tuna or Tuna Salad Freq',", "Dry, or Loquat Freq', 'Grapes or Raisins Freq', 'Banana Freq', 'Melon Freq', 'Kiwi", "in args: mb_columns_extra += ['Currently smokes','Ever smoked'] if 'sweets' in args: mb_columns_extra +=", "= extract('keep_household','s','keep_related') print(phenoChip.shape) print(phenoChip.columns) # print \"Only in chip:\" # print set(phenoChip.index.values)-set(phenoAll.index.values) #", "\\ # np.isnan(pheno.loc[participant,'Fat_g']) or \\ # np.isnan(pheno.loc[participant,'Protain_g']): # sum+=1 # print participant #", "'Mozzarella Cheese', 'Fried onions', 'Ice cream', 'Cream Cake', 'Green cabbage', 'Olives', 'Balsamic vinegar',", "sauce', 'Strawberry', 'Pastrami', 'Lemonade', 'Pasta with tomato sauce', 'Chicken']#removed: u'Soda water',u'Water', u'Salt', known_args+=", "A','Blood B','Blood RH-'] if 'cereals' in args: mb_columns_extra += ['Cornflakes Freq','Granola or Bernflaks", "drug_args=['D.lipid', 'D.All', 'D.Psychiatric', 'D.pain', 'D.CVD', 'D.GI','D.Thyroid', 'D.NSAID','D.Contraception'] meals=['Vodka or Arak', 'Avocado', 'Parsley', 'Coated", "< 15].index pheno=pheno[~pheno.index.isin(sterile_individuals)] if 'keep_household' not in args: #noSharedEnvIID=pandas.read_csv(iidsNoSharedEnv,usecols=[0],header=None,sep='\\t') #pheno=pheno[pheno.index.isin(noSharedEnvIID[0].astype(int).values)] #new code that", "'Vegetable Soup Freq', 'Hummus Salad Freq', 'Tahini Salad Freq', 'Cooked Vegetable Salads Freq',", "sum+=1 # print participant # print pheno.loc[participant,['Calories_kcal','Carbs_g','Fat_g','Protain_g','Protain_g']] # print sum if np.isnan(pheno.loc[participant,'Age']) or", "in kwargs: ratio=kwargs['ratio'] mb_columns = [c for c in pheno.columns if c[:2] in", "+= ['IsGenotek'] if ('covars' not in args) and ('covars_noPCs' not in args) and", "c in pheno.columns if c[:2]=='s_']] pheno_g = pheno[[c for c in pheno.columns if", "'Cooked zucchini', 'Sweet potato', 'Wine', 'Cookies', 'Challah', 'Spelled', 'Honey', 'Green beans', 'Milk', 'Peanut", "'Gilthead Bream', 'Garlic', 'Grapes', 'Chocolate Chip Cookies', 'Cucumber', 'Mung Bean', 'Ketchup', 'Sweet Yogurt',", "s_stats=['BMI','Cholesterol,total','WakeupGlucose','Albumin','Creatinine','HbA1C%','Height','Hips','Waist','WHR','HDLCholesterol'] #, 'Triglycerides', 'LDLCholesterol'] mb_columns+=s_stats mb_columns=list(set(mb_columns)) if 'fid' in args: mb_columns = ['FID']+mb_columns", "else: sterile_individuals = alpha_diversity_s[alpha_diversity_s < 15].index pheno=pheno[~pheno.index.isin(sterile_individuals)] if 'keep_household' not in args: #noSharedEnvIID=pandas.read_csv(iidsNoSharedEnv,usecols=[0],header=None,sep='\\t')", "Freq', 'Schnitzel Turkey or Chicken Freq', 'Chicken or Turkey With Skin Freq', 'Chicken", "Bed(os.path.join(cleanDataPath, 'PNP_autosomal_clean2_nodfukim_norelated'), count_A1=True)#.read() df_fam_no_related = pandas.read_csv(os.path.join(cleanDataPath, 'PNP_autosomal_clean2_nodfukim_norelated.fam'), delim_whitespace=True, index_col=0, header=None) df_fam = pandas.read_csv(os.path.join(cleanDataPath,", "('-9' not in args): pheno.replace(-9, np.nan, inplace=True) if 'permute' in args: pheno=pandas.DataFrame(pheno.values[np.random.permutation(pheno.shape[0])],index=pheno.index,columns=pheno.columns) return", "Hunger', 'Evening Hunger'] if 'legumes' in args: mb_columns_extra += ['Falafel in Pita Bread", "Mlawah, Kubana, Cigars Freq', 'Pizza Freq'] if 'qualityOfLiving' in args: mb_columns_extra += ['Stress','Sleep", "beans', 'Cooked mushrooms', 'Watermelon', 'Grilled cheese', 'Bissli', 'Pullet', 'Hummus', 'Chinese Chicken Noodles', 'Shakshouka',", "legs', 'Nuts', 'Goat Cheese', 'Jam', 'Feta Cheese', 'Mandarin', 'Pesto', 'Sugar substitute', 'Cheesecake', 'Raisins',", "args) and ('covars_noPCs' not in args) and ('other' not in args): if 'calories'", "'otu' in args: assert '16s' in args if 'dic' in args: pheno =pandas.read_csv(pheno_fn_bacDic,sep='\\t')", "Freq', 'Pizza Freq'] if 'qualityOfLiving' in args: mb_columns_extra += ['Stress','Sleep quality'] if 'smoking'", "'Mung Bean', 'Ketchup', 'Sweet Yogurt', 'Bread', 'Onion', 'Cream Cheese', 'Chicken soup', 'Wholemeal Roll',", "Raisins Freq', 'Banana Freq', 'Melon Freq', 'Kiwi or Strawberries Freq', 'Mango Freq', 'Peach,", "'Cauliflower or Broccoli Freq','Sweet Potato Freq', 'Brussels Sprouts, Green or Red Cabbage Freq',", "- freq','T2Activity kind', 'Type 2 activity - freq','T3Activity kind','Type 3 activity - freq']", "sandwich', 'Chicken thighs', 'Granola', 'Beet', 'Couscous', 'Beet Salad', 'Chocolate Mousse Cake', 'Sweet Roll',", "'c_', 'p_')] elif kwargs['taxa'][1]=='_': kwargs['taxa']=[kwargs['taxa']] for taxa in kwargs['taxa']: taxadf=pheno.filter(regex=(taxa)) mb_columns += taxadf.columns.values.tolist()", "#, 'Triglycerides', 'LDLCholesterol'] mb_columns+=s_stats mb_columns=list(set(mb_columns)) if 'fid' in args: mb_columns = ['FID']+mb_columns ########################FFQ", "if 'include_allPNP' in args: pheno =pandas.read_csv(pheno_fn_bacAllPNP,sep='\\t') else: pheno =pandas.read_csv(pheno_fn_bac,sep='\\t') pheno.set_index('IID', inplace=True, drop=True) if", "output.split('\\n')])] if ('16s' in args): pheno = pheno[[c for c in pheno if", "'fid' in args: mb_columns = ['FID']+mb_columns ########################FFQ START##################### if 'questionnaires' in args: args=list(args)+ffq_args", "'Triglycerides','TotalProtein','TotalBilirubin','Urea'] mb_columns += ['ALT','Albumin','AST','Basophils%','Calcium','Chloride','Cholesterol,total','Creatinine', 'CRP(WIDERANGE)','CRPhs','Eosinophils%','HCT','HDLCholesterol','Hemoglobin','HbA1C%','Lymphocytes%', 'MCH','MCHC','MCV','MPV','Monocytes%','Neutrophils%','Phosphorus','Platelets','Potassium','RBC', 'RDW','Sodium','TSH','WBC','LDLCholesterol'] if 'glucose' in args: mb_columns +=", "pepper', 'Bagel', 'Entrecote', 'Cottage cheese', 'Oil', 'Natural Yogurt', 'Walnuts', 'Edamame', 'Majadra', 'Oatmeal', 'Soy", "if 'permute' in args: pheno=pandas.DataFrame(pheno.values[np.random.permutation(pheno.shape[0])],index=pheno.index,columns=pheno.columns) return pheno if __name__==\"__main__\": # pheno=extract('dic','covars','keep_household',\"pastry\",ratio=0.2)#'all_bac' phenoAll =", "mb_columns += [c for c in pheno.columns if c[:2]=='g_' ] if 'f' in", "'include_allPNP'in args: status, output = subprocess.getstatusoutput(\"cut -f 1 %s -d ' ' |", "if 'lactose' in args: mb_columns += ['lactose'] if 'blood' in args: # mb_columns", "by total calories pheno.loc[:,mealsColumns]=pheno[mealsColumns][pheno[mealsColumns]!=-9].div(pheno['Calories_kcal_Total'].values,axis=0) pheno.replace(np.nan, 0,inplace=True) mb_columns += mealsColumns ########################FFQ END##################### #for c", "c in pheno.columns if c[:2] not in ['s_','g_','f_','o_','c_','p_']] if 'dic' in args: presence=((pheno[mb_columns]>threshold", "np.nan pheno.loc[pheno.Waist==-9, 'WHR'] = np.nan pheno['LDLCholesterol'] = pheno['Cholesterol,total'] - pheno['HDLCholesterol'] - 2*pheno['Triglycerides'] if", "'keep_household' not in args: #noSharedEnvIID=pandas.read_csv(iidsNoSharedEnv,usecols=[0],header=None,sep='\\t') #pheno=pheno[pheno.index.isin(noSharedEnvIID[0].astype(int).values)] #new code that decides which individuals to", "or Challah Freq', 'Light Bread Freq', 'Wholemeal or Rye Bread Freq', 'Baguette Freq',", "Freq', 'Dried Fruits Freq', 'Fruit Salad Freq'] if 'hunger' in args: mb_columns_extra +=", "for c in pheno.columns if c[:2]=='PC'] if 'lactose' in args: mb_columns += ['lactose']", "'CRP(WIDERANGE)','CRPhs','Eosinophils%','HCT','HDLCholesterol','Hemoglobin','HbA1C%','Lymphocytes%', 'MCH','MCHC','MCV','MPV','Monocytes%','Neutrophils%','Phosphorus','Platelets','Potassium','RBC', 'RDW','Sodium','TSH','WBC','LDLCholesterol'] if 'glucose' in args: mb_columns += ['95P_Glucose','Glucose_Noise','Max_Glucose','Median_Glucose','WakeupGlucose', 'MeanGlucoseResponse','MeanBreadResponse','MeanBreadButterResponse'] if 'ffq'", "Cigars Freq', 'Pizza Freq'] if 'qualityOfLiving' in args: mb_columns_extra += ['Stress','Sleep quality'] if", "Cholent', 'Cracker', 'Chocolate Cookies', 'White beans', 'Cooked zucchini', 'Sweet potato', 'Wine', 'Cookies', 'Challah',", "['Oil as an addition for Salads or Stews Freq','Mayonnaise Including Light Freq', 'Thousand", "'Green cabbage', 'Olives', 'Balsamic vinegar', 'Peach', 'Light Yellow Cheese', 'Red pepper', 'Bagel', 'Entrecote',", "in pheno.columns if c[:2] in ['s_','g_','f_','o_','c_','p_']] other_columns = [c for c in pheno.columns", "'dic and no_log are mutually exclusive' pheno[mb_columns] = 10**pheno[mb_columns] if 'all_non_bac' in args:", "['s_','g_','f_','o_','c_','p_']] other_columns = [c for c in pheno.columns if c[:2] not in ['s_','g_','f_','o_','c_','p_']]", "extract('keep_household','s','keep_related') print(phenoChip.shape) print(phenoChip.columns) # print \"Only in chip:\" # print set(phenoChip.index.values)-set(phenoAll.index.values) # print", "'Coffee', 'Pasta Salad', 'Cuba', 'Chicken Liver', 'Sweet Challah', 'Minced meat', 'Chocolate cake', 'Diet", "args: mb_columns = ['FID']+mb_columns ########################FFQ START##################### if 'questionnaires' in args: args=list(args)+ffq_args mb_columns_extra=[] if", "or Eggplant Freq','Peas, Green Beans or Okra Cooked Freq', 'Cauliflower or Broccoli Freq','Sweet", "args: mealsColumns=[val.replace(' ','_') for val in meals] #Correct by total calories pheno.loc[:,mealsColumns]=pheno[mealsColumns][pheno[mealsColumns]!=-9].div(pheno['Calories_kcal_Total'].values,axis=0) pheno.replace(np.nan,", "['Age','Gender','Calories_kcal','Carbs_g','Fat_g','Protain_g','IsGenotek'] if 'include_allPNP' not in args or ('PCs') in args: mb_columns += [c", "Freq', 'Fruit Pie or Cake Freq', 'Coated or Stuffed Cookies, Waffles or Biscuits", "Stuffed Cookies, Waffles or Biscuits Freq', 'Simple Cookies or Biscuits Freq', 'Ice Cream", "features_to_drop+=['PC1','PC2','PC3','PC4','PC5'] pheno=pheno.drop(features_to_drop,axis=1) if ('-9' not in args): pheno.replace(-9, np.nan, inplace=True) if 'permute' in", "'Salty Cookies', 'Beef', 'Meatballs', 'Hamburger sandwich', 'Chicken thighs', 'Granola', 'Beet', 'Couscous', 'Beet Salad',", "Cake', 'Sweet Roll', 'Danish', 'Coffee', 'Pasta Salad', 'Cuba', 'Chicken Liver', 'Sweet Challah', 'Minced", "'Wholemeal Roll', 'Canned corn', 'Salty Cheese', 'Melawach', 'White cake', 'Apple', 'Lettuce Salad', 'Cereals',", "Cheese', 'Melawach', 'White cake', 'Apple', 'Lettuce Salad', 'Cereals', 'Yellow Cheese', 'Tea', 'Beer', 'Mozzarella", "np.isnan(pheno.loc[participant,'Carbs_g']) or \\ # np.isnan(pheno.loc[participant,'Fat_g']) or \\ # np.isnan(pheno.loc[participant,'Protain_g']): # sum+=1 # print", "print \"Only in chip:\" # print set(phenoChip.index.values)-set(phenoAll.index.values) # print len(set(phenoChip.index)-set(phenoAll.index)) print(pheno.columns.values.tolist()) print(pheno.shape) sum=0", "'Wholemeal Light Bread', 'Marble Cake', 'Brown Rice', 'Cold cut', 'Gilthead Bream', 'Garlic', 'Grapes',", "or \\ # np.isnan(pheno.loc[participant,'Protain_g']): # sum+=1 # print participant # print pheno.loc[participant,['Calories_kcal','Carbs_g','Fat_g','Protain_g','Protain_g']] #", "Freq', 'Kiwi or Strawberries Freq', 'Mango Freq', 'Peach, Nectarine, Plum Freq', 'Pear Fresh,", "Tea', 'Schnitzel', 'Brown Sugar', 'Peanuts', 'Mayonnaise', 'Persimmon', 'Apple juice', 'Stuffed Peppers', 'Egg', 'Pear',", "if c[:2] in ['s_','g_','f_','o_','c_','p_']] other_columns = [c for c in pheno.columns if c[:2]", "'Vinaigrette', 'Bamba', 'Dark Chocolate', 'Turkey Shawarma', 'Olive oil', #u'Parmesan\\xc2\\xa0cheese', 'Guacamole', 'Coleslaw', 'Americano', 'Pesek", "not in args: features_to_drop +=['Age','Gender','Calories_kcal','Carbs_g','Fat_g','Protain_g','IsGenotek'] else: features_to_drop +=['Age','Gender'] if ('include_allPNP' not in args)", "Sugar Freq', 'Decaffeinated Coffee Freq', 'Coffee Freq', 'Herbal Tea Freq', 'Green Tea Freq',", "args: mb_columns += [c for c in pheno.columns if c[:2]=='f_' ] if 'o'", "'PNP_autosomal_clean2_nodfukim_norelated'), count_A1=True)#.read() df_fam_no_related = pandas.read_csv(os.path.join(cleanDataPath, 'PNP_autosomal_clean2_nodfukim_norelated.fam'), delim_whitespace=True, index_col=0, header=None) df_fam = pandas.read_csv(os.path.join(cleanDataPath, 'PNP_autosomal_clean2_nodfukim.fam'),", "Beef, Chicken Freq', 'Shish Kebab in Pita Bread Freq', 'Falafel in Pita version", "Soup', 'Tzfatit Cheese', 'Nectarine', 'Chicken legs', 'Nuts', 'Goat Cheese', 'Jam', 'Feta Cheese', 'Mandarin',", "'Wholemeal Noodles', 'Green Tea', 'Schnitzel', 'Brown Sugar', 'Peanuts', 'Mayonnaise', 'Persimmon', 'Apple juice', 'Stuffed", "'Wholemeal Bread', 'Tuna Salad', 'Chocolate spread', 'Kebab', 'Rice crackers', 'Wafers', 'Lettuce', 'Rice Noodles',", "#pheno_fn_bac =os.path.join(cleanDataPath,'allChipPhenotypes_nodfukimWith5PCair.phenotypes') pheno_fn_bacDic=os.path.join(cleanDataPath,'dicNoMissingKakiPhenotypesWithCovariates_nodfukim.phenotypes') pheno_fn_bacAllPNP=os.path.join(rawDataPath,'allPNPPhenotypes.phenotypes') iidsNoSharedEnv='/net/mraid08/export/genie/Microbiome/Analyses/PNPChip/cleanData/PNP_autosomal_clean2_nodfukim_NoCouples.txt' PNP_16S_DIR = '/net/mraid08/export/jafar/Microbiome/Analyses/PNPChip/16S' glycemicStatusPath='/net/mraid08/export/jafar/Microbiome/Analyses/PNPChip/glycemic_status.csv' def extract(*args,**kwargs): known_args =", "1 %s -d ' ' | cut -f 1 -d '_'\"%os.path.join(rawDataPath,'tmp','dfukim.txt')) pheno =pheno[~pheno.index.isin([int(dafook)", "args: mb_columns_extra += ['Egg Recipes Freq', 'Egg, Hard Boiled or Soft Freq', 'Schnitzel", "Cocktails Freq', 'Alcoholic Drinks Freq'] if 'fruits' in args: mb_columns_extra += ['Mandarin or", "in kwargs['taxa']: taxadf=pheno.filter(regex=(taxa)) mb_columns += taxadf.columns.values.tolist() if 'all_bac' in args: args=list(args)+['s','g','f','o','c','p'] if 's'", "pheno.merge(df_taxa, left_index=True, right_index=True) pheno_s = pheno[[c for c in pheno.columns if c[:2]=='s_']] pheno_g", "= pheno['Cholesterol,total'] - pheno['HDLCholesterol'] - 2*pheno['Triglycerides'] if 'genotek_only' in args: pheno = pheno.loc[pheno['IsGenotek']==1]", "'Shakshouka', 'Tahini', 'Chicken breast', 'Steak', 'Light Bread', 'Wholemeal Crackers', 'Sugar Free Gum', 'Hamburger',", "Noodles', 'Green Tea', 'Schnitzel', 'Brown Sugar', 'Peanuts', 'Mayonnaise', 'Persimmon', 'Apple juice', 'Stuffed Peppers',", "'Chicken soup', 'Wholemeal Roll', 'Canned corn', 'Salty Cheese', 'Melawach', 'White cake', 'Apple', 'Lettuce", "cheese', 'Bissli', 'Pullet', 'Hummus', 'Chinese Chicken Noodles', 'Shakshouka', 'Tahini', 'Chicken breast', 'Steak', 'Light", "'Canned Tuna or Tuna Salad Freq', 'Fish (not Tuna) Pickled, Dried, Smoked, Canned", "+= [arg] mb_columns_extra=[val.replace(' ','_') for val in mb_columns_extra] mb_columns+=mb_columns_extra if 'meals' in args:", "'Jam', 'Feta Cheese', 'Mandarin', 'Pesto', 'Sugar substitute', 'Cheesecake', 'Raisins', 'Chocolate', 'Quinoa', 'Cooked broccoli',", "Bread Freq', 'Wholemeal or Rye Bread Freq', 'Baguette Freq', 'Roll or Bageles Freq',", "beans', 'Cooked zucchini', 'Sweet potato', 'Wine', 'Cookies', 'Challah', 'Spelled', 'Honey', 'Green beans', 'Milk',", "Freq', 'Fresh Vegetable Salad Without Dressing or Oil Freq', 'Fresh Vegetable Salad With", "'Soymilk', 'Dates', 'Croissant', 'Biscuit', 'Potato chips', 'White Cheese', 'French fries', 'Wholemeal Bread', 'Tuna", "pandas.read_csv(os.path.join(PNP_16S_DIR, taxa_level+'.txt'), sep='\\t', index_col=0) df_taxa[df_taxa<1e-3] = 1e-4 df_taxa = np.log10(df_taxa) pheno = pheno.merge(df_taxa,", "pheno if __name__==\"__main__\": # pheno=extract('dic','covars','keep_household',\"pastry\",ratio=0.2)#'all_bac' phenoAll = extract('s','include_allPNP','covars')#'include_allPNP','keep_household','ffq','keep_related')#'include_allPNP', print(phenoAll.shape) print(phenoAll.columns) phenoAll = extract('s','include_allPNP')", "('other' not in args): if 'calories' not in args: features_to_drop +=['Age','Gender','Calories_kcal','Carbs_g','Fat_g','Protain_g','IsGenotek'] else: features_to_drop", "print(phenoAll.columns) phenoAll = extract('s','include_allPNP') print(phenoAll.shape) print(phenoAll.columns) phenoChip = extract('keep_household','s','keep_related') print(phenoChip.shape) print(phenoChip.columns) # print", "'White or Brown Sugar Freq', 'Artificial Sweeteners Freq',] if 'drinks' in args: mb_columns_extra", "pheno: ### if (c[:2] not in ['c_', 'g_', 'o_', 's_', 'k_', 'p_', 'f_']):", "# mb_columns += ['ALT','Albumin','AST','Basophils%','Calcium','Chloride','Cholesterol,total','Creatinine', # 'CRP(WIDERANGE)','CRPhs','Eosinophils%','HCT','HDLCholesterol','Hemoglobin','HbA1C%','Lymphocytes%', # 'MCH','MCHC','MCV','MPV','Monocytes%','Neutrophils%','Phosphorus','Platelets','Potassium','RBC', # 'RDW','Sodium','TSH','WBC','AlkalinePhosphatase','GGT','LDH','Iron','LDLCholesterol','Magnesium', # 'Triglycerides','TotalProtein','TotalBilirubin','Urea'] mb_columns", "Grapefruit Juice Freq', 'Apple Freq', 'Apricot Fresh or Dry, or Loquat Freq', 'Grapes", "c mb_columns=list(set(mb_columns)) pheno= pheno[mb_columns] if 'threshold' not in kwargs: threshold = -4 else:", "an addition for Salads or Stews Freq','Mayonnaise Including Light Freq', 'Thousand Island Dressing,", "or Flakes Freq'] if 'delivery' in args: mb_columns_extra += ['C-Section','Home delivery','Was breastfed'] if", "'taxa' in kwargs: if kwargs['taxa'][0]=='*': kwargs['taxa']=[initial+kwargs['taxa'][1:] for initial in ('s_', 'g_', 'f_', 'o_',", "Cooked Freq', 'Cauliflower or Broccoli Freq','Sweet Potato Freq', 'Brussels Sprouts, Green or Red", "inplace=True, drop=True) pheno_s = pheno_nodic[[c for c in pheno_nodic.columns if c[:2]=='s_']] pheno_g =", "Salad With Dressing or Oil Freq', 'Avocado Freq','Lemon Freq', 'Onion Freq', 'Garlic Freq',", "'Americano', 'Pesek Zman snack', 'Green onions', 'Mushrooms', 'Lemon juice', 'Canned Tuna Fish', 'Vegetable", "'questionnaires' in args: args=list(args)+ffq_args mb_columns_extra=[] if 'activity' in args: mb_columns_extra += ['Work activity','Physical", "as Salami Freq', 'Pastrami or Smoked Turkey Breast Freq', 'Turkey Meatballs, Beef, Chicken", "sep='\\t', index_col=0) df_taxa[df_taxa<1e-3] = 1e-4 df_taxa = np.log10(df_taxa) pheno = pheno.merge(df_taxa, left_index=True, right_index=True)", "if 'ratio' in kwargs: ratio=kwargs['ratio'] mb_columns = [c for c in pheno.columns if", "Yogurt', 'Bread', 'Onion', 'Cream Cheese', 'Chicken soup', 'Wholemeal Roll', 'Canned corn', 'Salty Cheese',", "'French fries', 'Wholemeal Bread', 'Tuna Salad', 'Chocolate spread', 'Kebab', 'Rice crackers', 'Wafers', 'Lettuce',", "# 'Triglycerides','TotalProtein','TotalBilirubin','Urea'] mb_columns += ['ALT','Albumin','AST','Basophils%','Calcium','Chloride','Cholesterol,total','Creatinine', 'CRP(WIDERANGE)','CRPhs','Eosinophils%','HCT','HDLCholesterol','Hemoglobin','HbA1C%','Lymphocytes%', 'MCH','MCHC','MCV','MPV','Monocytes%','Neutrophils%','Phosphorus','Platelets','Potassium','RBC', 'RDW','Sodium','TSH','WBC','LDLCholesterol'] if 'glucose' in args: mb_columns", "or Dry, or Loquat Freq', 'Grapes or Raisins Freq', 'Banana Freq', 'Melon Freq',", "pheno.set_index('IID', inplace=True, drop=True) pheno_nodic =pandas.read_csv(pheno_fn_bac,sep='\\t') pheno_nodic.set_index('IID', inplace=True, drop=True) pheno_s = pheno_nodic[[c for c", "= np.nan pheno.loc[pheno.Waist==-9, 'WHR'] = np.nan pheno['LDLCholesterol'] = pheno['Cholesterol,total'] - pheno['HDLCholesterol'] - 2*pheno['Triglycerides']", "args, '16s and dic are mutually exclusive' if ('taxa' in list(kwargs.keys())): assert len(set(['all_bac','s',", "if 'keep_missingCovars' not in args: #One participant 244624 has no 'Calories_kcal','Carbs_g','Fat_g','Protain_g' #3 participant", "in args: mb_columns += [c for c in pheno.columns if c[:2]=='f_' ] if", "or Tuna Salad Freq', 'Fish (not Tuna) Pickled, Dried, Smoked, Canned Freq'] if", "sum if np.isnan(pheno.loc[participant,'Age']) or np.isnan(pheno.loc[participant,'Gender']) : print(\"Participant %s, age %s, gender %s\" %(participant,pheno.loc[participant,'Age'],pheno.loc[participant,'Gender']))", "'Majadra', 'Oatmeal', 'Soy sauce', 'Strawberry', 'Pastrami', 'Lemonade', 'Pasta with tomato sauce', 'Chicken']#removed: u'Soda", "+=['median_Without_BMI_ALT_Overall','WakeupGlucose','BMI','VegeterianScale'] pheno.loc[pheno['VegeterianScale']<0, 'VegeterianScale']=np.nan if 'drugs' in args: mb_columns+=drug_args else: for arg in drug_args:", "in args, 'IsGenotek and covars are mutually exclusive' if 'otu' in args: assert", "kwargs['taxa']: taxadf=pheno.filter(regex=(taxa)) mb_columns += taxadf.columns.values.tolist() if 'all_bac' in args: args=list(args)+['s','g','f','o','c','p'] if 's' in", "Boiled, Baked, Mashed, Potatoes Salad Freq', 'Fries Freq', 'Pasta or Flakes Freq'] if", "'Rice crackers', 'Wafers', 'Lettuce', 'Rice Noodles', 'Lentils', 'Mutton', 'Wholemeal Noodles', 'Green Tea', 'Schnitzel',", "or Turkey Without Skin Freq', 'Sausages Freq', 'Sausages such as Salami Freq', 'Pastrami", "Soup Freq', 'Red Pepper Freq', 'Green Pepper Freq', 'Cucumber Freq', 'Zucchini or Eggplant", "= 10**pheno[mb_columns] if 'all_non_bac' in args: args=list(args)+['covars','blood','glucose','ffq','antropo'] mb_columns += ['Age','Gender','Calories_kcal','Carbs_g','Fat_g','Protain_g','IsGenotek'] if 'include_allPNP' not", "'Kebab', 'Rice crackers', 'Wafers', 'Lettuce', 'Rice Noodles', 'Lentils', 'Mutton', 'Wholemeal Noodles', 'Green Tea',", "not in args: if '16s' in args: sterile_individuals = alpha_diversity_g[alpha_diversity_g < 4].index else:", "in args: #AddingIrisGlucose df_glucose = pandas.read_csv(glycemicStatusPath).set_index('RegNum') pheno = df_glucose.merge(pheno, left_index=True, right_index=True,how='right') mb_columns +=['median_Without_BMI_ALT_Overall','WakeupGlucose','BMI','VegeterianScale']", "Oatmeal Porridge Freq', 'Rice Freq','Couscous, Burgul, Mamaliga, Groats Freq', 'Potatoes Boiled, Baked, Mashed,", "+=['Age','Gender'] if ('include_allPNP' not in args) and ('PCs' not in args): features_to_drop+=['PC1','PC2','PC3','PC4','PC5'] pheno=pheno.drop(features_to_drop,axis=1)", "or Raisins Freq', 'Banana Freq', 'Melon Freq', 'Kiwi or Strawberries Freq', 'Mango Freq',", "Tea Freq', 'Regular Tea Freq', 'Beer Freq', 'Sweet Dry Wine, Cocktails Freq', 'Alcoholic", "Pita Bread Freq', 'Cooked Legumes Freq', 'Processed Meat Free Products Freq'] if 'meatProducts'", "Freq', 'Fried Fish Freq', 'Canned Tuna or Tuna Salad Freq', 'Fish (not Tuna)", "if c[:2]=='c_' ] if 'p' in args: mb_columns += [c for c in", "+= ['Egg Recipes Freq', 'Egg, Hard Boiled or Soft Freq', 'Schnitzel Turkey or", "- pheno['HDLCholesterol'] - 2*pheno['Triglycerides'] if 'genotek_only' in args: pheno = pheno.loc[pheno['IsGenotek']==1] if 'swab_only'", "] if 'c' in args: mb_columns += [c for c in pheno.columns if", "'Canned corn', 'Salty Cheese', 'Melawach', 'White cake', 'Apple', 'Lettuce Salad', 'Cereals', 'Yellow Cheese',", "df_fam_no_related = pandas.read_csv(os.path.join(cleanDataPath, 'PNP_autosomal_clean2_nodfukim_norelated.fam'), delim_whitespace=True, index_col=0, header=None) df_fam = pandas.read_csv(os.path.join(cleanDataPath, 'PNP_autosomal_clean2_nodfukim.fam'), delim_whitespace=True, index_col=0,", "args: assert '16s' in args if 'dic' in args: pheno =pandas.read_csv(pheno_fn_bacDic,sep='\\t') pheno.set_index('IID', inplace=True,", "= [c for c in pheno.columns if c[:2] in ['s_','g_','f_','o_','c_','p_']] other_columns = [c", "couscous', 'Pistachio', 'Date honey', 'Vinaigrette', 'Bamba', 'Dark Chocolate', 'Turkey Shawarma', 'Olive oil', #u'Parmesan\\xc2\\xa0cheese',", "'Kohlrabi', 'Eggplant Salad', 'Cooked green beans', 'Cooked mushrooms', 'Watermelon', 'Grilled cheese', 'Bissli', 'Pullet',", "Freq', 'Potatoes Boiled, Baked, Mashed, Potatoes Salad Freq', 'Fries Freq', 'Pasta or Flakes", "'Danish', 'Coffee', 'Pasta Salad', 'Cuba', 'Chicken Liver', 'Sweet Challah', 'Minced meat', 'Chocolate cake',", "'Lentil Soup', 'Tzfatit Cheese', 'Nectarine', 'Chicken legs', 'Nuts', 'Goat Cheese', 'Jam', 'Feta Cheese',", "pheno.columns if c[:2] in ['s_','g_','f_','o_','c_','p_']] other_columns = [c for c in pheno.columns if", "pregnant','Is breastfeeding','Is after birth', 'Taking contraceptives', 'Regular period', 'Irregular period', 'No period','Hormonal replacment',", "'Low fat Milk', 'Pickled cucumber', 'Soymilk', 'Dates', 'Croissant', 'Biscuit', 'Potato chips', 'White Cheese',", "#bed = Bed(os.path.join(cleanDataPath, 'PNP_autosomal_clean2_nodfukim_norelated'), count_A1=True)#.read() df_fam_no_related = pandas.read_csv(os.path.join(cleanDataPath, 'PNP_autosomal_clean2_nodfukim_norelated.fam'), delim_whitespace=True, index_col=0, header=None) df_fam", "in Pita version 2 Freq','Processed Meat Products Freq','Beef, Veal, Lamb, Pork, Steak, Golash", "mealsColumns=[val.replace(' ','_') for val in meals] #Correct by total calories pheno.loc[:,mealsColumns]=pheno[mealsColumns][pheno[mealsColumns]!=-9].div(pheno['Calories_kcal_Total'].values,axis=0) pheno.replace(np.nan, 0,inplace=True)", "Freq', 'Fries Freq', 'Pasta or Flakes Freq'] if 'delivery' in args: mb_columns_extra +=", "'Tuna Salad', 'Chocolate spread', 'Kebab', 'Rice crackers', 'Wafers', 'Lettuce', 'Rice Noodles', 'Lentils', 'Mutton',", "Freq', 'Pickled Vegetables Freq', 'Olives Freq'] if 'womenOnlyQuestions' in args: mb_columns_extra += ['Is", "if 'g' in args: mb_columns += [c for c in pheno.columns if c[:2]=='g_'", "+= ['Alcoholic_Drinks_Freq','Cigarretes_per_day','Coffee_Freq','Start_smoking_age'] if 'antropo' in args: mb_columns += ['BMI','BPdia','BPsys','HeartRate','Height','Hips','WHR','Waist'] if 's_stats_pheno' in args:", "'Light Bread Freq', 'Wholemeal or Rye Bread Freq', 'Baguette Freq', 'Roll or Bageles", "assert 'covars' not in args, 'IsGenotek and covars are mutually exclusive' if 'otu'", "him\" features_to_drop=[] if ('IsGenotek' not in args) and ('covars' not in args) and", "pheno_fn_bac =os.path.join(cleanDataPath,'noMissingKakiPhenotypesWithCovariates_nodfukim.phenotypes') #pheno_fn_bac =os.path.join(cleanDataPath,'allChipPhenotypes_nodfukimWith5PCair.phenotypes') pheno_fn_bacDic=os.path.join(cleanDataPath,'dicNoMissingKakiPhenotypesWithCovariates_nodfukim.phenotypes') pheno_fn_bacAllPNP=os.path.join(rawDataPath,'allPNPPhenotypes.phenotypes') iidsNoSharedEnv='/net/mraid08/export/genie/Microbiome/Analyses/PNPChip/cleanData/PNP_autosomal_clean2_nodfukim_NoCouples.txt' PNP_16S_DIR = '/net/mraid08/export/jafar/Microbiome/Analyses/PNPChip/16S' glycemicStatusPath='/net/mraid08/export/jafar/Microbiome/Analyses/PNPChip/glycemic_status.csv' def extract(*args,**kwargs):", "+= ['Mandarin or Clementine Freq', 'Orange or Grapefruit Freq', 'Orange or Grapefruit Juice", "import numpy as np import sys from pysnptools.snpreader.bed import Bed import subprocess cleanDataPath='/net/mraid08/export/jafar/Microbiome/Analyses/PNPChip/cleanData/'", "Freq', 'Chicken or Turkey Without Skin Freq', 'Sausages Freq', 'Sausages such as Salami", "'fid', 'keep_household', 'no_log', 'keep_related', 'keep_sterile', '16s', '-9', 'covars_noPCs', 'PCs', 'lactose','include_allPNP','IsGenotek','permute','meals','other','drugs', 'calories','bloodType','questionnaires','keep_missingCovars','activity','activityTypesFreq','cereals', 'delivery','dressSweetners','drinks','fruits','hunger', 'legumes','meatProducts','pastry','qualityOfLiving',", "Yellow Cheese', 'Red pepper', 'Bagel', 'Entrecote', 'Cottage cheese', 'Oil', 'Natural Yogurt', 'Walnuts', 'Edamame',", "Dressing Freq', 'Honey, Jam, fruit syrup, Maple syrup Freq', 'White or Brown Sugar", "Free Products Freq'] if 'meatProducts' in args: mb_columns_extra += ['Egg Recipes Freq', 'Egg,", "'Yeast Cakes and Cookies as Rogallach, Croissant or Donut Freq', 'Cake, Torte Cakes,", "np.isnan(pheno.loc[participant,'Age']) or np.isnan(pheno.loc[participant,'Gender']) : print(\"Participant %s, age %s, gender %s\" %(participant,pheno.loc[participant,'Age'],pheno.loc[participant,'Gender'])) # print", "'Rice', 'Diet Fruit Drink', 'Corn schnitzel', 'Cappuccino', 'Low fat Milk', 'Pickled cucumber', 'Soymilk',", "pheno.columns if c[:2]=='g_']] ### for c in pheno: ### if (c[:2] not in", "in args: mb_columns += [c for c in pheno.columns if c[:2]=='g_' ] if", "Salami Freq', 'Pastrami or Smoked Turkey Breast Freq', 'Turkey Meatballs, Beef, Chicken Freq',", "1e-4 df_taxa = np.log10(df_taxa) pheno = pheno.merge(df_taxa, left_index=True, right_index=True) pheno_s = pheno[[c for", "+= ['Tomato Freq','Cooked Tomatoes, Tomato Sauce, Tomato Soup Freq', 'Red Pepper Freq', 'Green", "in args: assert 'covars' not in args, 'IsGenotek and covars are mutually exclusive'", "Salad Freq', 'Fries Freq', 'Pasta or Flakes Freq'] if 'delivery' in args: mb_columns_extra", "and Cookies as Rogallach, Croissant or Donut Freq', 'Cake, Torte Cakes, Chocolate Cake", "args: mb_columns_extra += ['Nectar, Cider Freq', 'Diet Juice Freq', 'Juice Freq', 'Diet Soda", "'Cheesecake', 'Raisins', 'Chocolate', 'Quinoa', 'Cooked broccoli', 'Beef Cholent', 'Cracker', 'Chocolate Cookies', 'White beans',", "kwargs: threshold = -4 else: threshold=kwargs['threshold'] if 'ratio' in kwargs: ratio=kwargs['ratio'] mb_columns =", "pandas import os import numpy as np import sys from pysnptools.snpreader.bed import Bed", "if 'antropo' in args: mb_columns += ['BMI','BPdia','BPsys','HeartRate','Height','Hips','WHR','Waist'] if 's_stats_pheno' in args: s_stats=['BMI','Cholesterol,total','WakeupGlucose','Albumin','Creatinine','HbA1C%','Height','Hips','Waist','WHR','HDLCholesterol'] #,", "args: mb_columns += [c for c in pheno.columns if c[:2]=='g_' ] if 'f'", "in pheno.columns if c[:2]=='s_']] pheno_g = pheno[[c for c in pheno.columns if c[:2]=='g_']]", "Plum Freq', 'Pear Fresh, Cooked or Canned Freq','Persimmon Freq', 'Watermelon Freq', 'Dried Fruits", "Challah', 'Minced meat', 'Chocolate cake', 'Diet Coke', 'Dried dates', 'Carrot Cake', 'Doritos', 'Israeli", "White Grains, Watermelon Seeds Freq', 'Nuts, almonds, pistachios Freq','Peanuts Freq'] if 'vegetables' in", "Freq', 'Juice Freq', 'Diet Soda Freq', 'Regular Sodas with Sugar Freq', 'Decaffeinated Coffee", "= (pheno_s>pheno_s.min().min()).sum(axis=1) alpha_diversity_g = (pheno_g>pheno_g.min().min()).sum(axis=1) pheno.loc[pheno.Hips==-9, 'WHR'] = np.nan pheno.loc[pheno.Waist==-9, 'WHR'] = np.nan", "Tea Freq', 'Beer Freq', 'Sweet Dry Wine, Cocktails Freq', 'Alcoholic Drinks Freq'] if", "or Grilled Freq', 'Fried Fish Freq', 'Canned Tuna or Tuna Salad Freq', 'Fish", "'Pancake', 'Pasta', 'Sauteed vegetables', 'Plum', 'Goat Milk Yogurt', 'Orange juice', 'Potatoes', 'Halva', 'Yellow", "'g' in args: mb_columns += [c for c in pheno.columns if c[:2]=='g_' ]", "'Ice Cream or Popsicle which contains Dairy Freq', 'Popsicle Without Dairy Freq', 'Black", "presence=(pheno[mb_columns]>threshold +1e-5).astype(int).sum() presence=presence[presence > len(presence)*ratio].index.values.tolist() pheno=pheno[other_columns+presence] if ('keep_related' not in args): #bed =", "or Turkey Dishes Freq', 'Beef or Chicken Soup Freq', 'Internal Organs Freq', 'Fish", "Cilantro, Green Onion Freq', 'Fresh Vegetable Salad Without Dressing or Oil Freq', 'Fresh", "print(phenoAll.shape) print(phenoAll.columns) phenoChip = extract('keep_household','s','keep_related') print(phenoChip.shape) print(phenoChip.columns) # print \"Only in chip:\" #", "'all_non_bac' in args: args=list(args)+['covars','blood','glucose','ffq','antropo'] mb_columns += ['Age','Gender','Calories_kcal','Carbs_g','Fat_g','Protain_g','IsGenotek'] if 'include_allPNP' not in args or", "Freq', 'Salty Snacks Freq', 'Cheese Cakes or Cream Cakes Freq', 'Yeast Cakes and", "in drug_args: if arg in args: mb_columns += [arg] mb_columns_extra=[val.replace(' ','_') for val", "mb_columns = [] if 'taxa' in kwargs: if kwargs['taxa'][0]=='*': kwargs['taxa']=[initial+kwargs['taxa'][1:] for initial in", "not in args, 'IsGenotek and covars are mutually exclusive' if 'otu' in args:", "'Pickled Vegetables Freq', 'Olives Freq'] if 'womenOnlyQuestions' in args: mb_columns_extra += ['Is pregnant','Is", "if 'IsGenotek' in args: assert 'covars' not in args, 'IsGenotek and covars are", "Turkey Without Skin Freq', 'Sausages Freq', 'Sausages such as Salami Freq', 'Pastrami or", "'Past breastfeeding'] if 'other' in args: #AddingIrisGlucose df_glucose = pandas.read_csv(glycemicStatusPath).set_index('RegNum') pheno = df_glucose.merge(pheno,", "'Pasta Salad', 'Cuba', 'Chicken Liver', 'Sweet Challah', 'Minced meat', 'Chocolate cake', 'Diet Coke',", "iidsNoSharedEnv='/net/mraid08/export/genie/Microbiome/Analyses/PNPChip/cleanData/PNP_autosomal_clean2_nodfukim_NoCouples.txt' PNP_16S_DIR = '/net/mraid08/export/jafar/Microbiome/Analyses/PNPChip/16S' glycemicStatusPath='/net/mraid08/export/jafar/Microbiome/Analyses/PNPChip/glycemic_status.csv' def extract(*args,**kwargs): known_args = ['dic', 'all_bac', 's', 'g','f','o','c','p','otu',", "known_kwargs, 'unkown kwarg: %s'%(kwarg) if ('16s' in args): assert 'dic' not in args,", "subprocess.getstatusoutput(\"cut -f 1 %s -d ' ' | cut -f 1 -d '_'\"%os.path.join(rawDataPath,'tmp','dfukim.txt'))", "index_col=0) df_taxa[df_taxa<1e-3] = 1e-4 df_taxa = np.log10(df_taxa) pheno = pheno.merge(df_taxa, left_index=True, right_index=True) pheno_s", "'include_allPNP' not in args or ('PCs') in args: mb_columns += [c for c", "c[:2]=='c_' ] if 'p' in args: mb_columns += [c for c in pheno.columns", "eggplant', 'Salmon', 'Cashew', 'Jewish donut', 'Rugelach', 'Cake', 'Ravioli', 'Tomatoes', 'Wholemeal Light Bread', 'Marble", "cake', 'Diet Coke', 'Dried dates', 'Carrot Cake', 'Doritos', 'Israeli couscous', 'Pistachio', 'Date honey',", "86356,762339,805175 have no 'Age','Gender' # if set(['Age','Gender','Calories_kcal','Carbs_g','Fat_g','Protain_g','IsGenotek'])<=set(pheno.columns.values): keep_inds=pheno.loc[:,['Age','Gender','Calories_kcal','Carbs_g','Fat_g','Protain_g','IsGenotek']].replace(-9, np.nan).dropna().index.values beforeNumParticpants=pheno.shape[0] pheno=pheno.loc[keep_inds] afterNumParticpants=pheno.shape[0] if", "onions', 'Ice cream', 'Cream Cake', 'Green cabbage', 'Olives', 'Balsamic vinegar', 'Peach', 'Light Yellow", "if 'other' in args: #AddingIrisGlucose df_glucose = pandas.read_csv(glycemicStatusPath).set_index('RegNum') pheno = df_glucose.merge(pheno, left_index=True, right_index=True,how='right')", "Salad', 'Tilapia', 'Pizza', 'Fried cauliflower', 'Roasted eggplant', 'Baguette', 'Lentil Soup', 'Tzfatit Cheese', 'Nectarine',", "Freq', 'Mango Freq', 'Peach, Nectarine, Plum Freq', 'Pear Fresh, Cooked or Canned Freq','Persimmon", "keep_inds=pheno.loc[:,['Age','Gender','Calories_kcal','Carbs_g','Fat_g','Protain_g','IsGenotek']].replace(-9, np.nan).dropna().index.values beforeNumParticpants=pheno.shape[0] pheno=pheno.loc[keep_inds] afterNumParticpants=pheno.shape[0] if beforeNumParticpants-afterNumParticpants>0: pass #print \"Removing participants with missing", "args: mb_columns += ['BMI','BPdia','BPsys','HeartRate','Height','Hips','WHR','Waist'] if 's_stats_pheno' in args: s_stats=['BMI','Cholesterol,total','WakeupGlucose','Albumin','Creatinine','HbA1C%','Height','Hips','Waist','WHR','HDLCholesterol'] #, 'Triglycerides', 'LDLCholesterol'] mb_columns+=s_stats", "glycemicStatusPath='/net/mraid08/export/jafar/Microbiome/Analyses/PNPChip/glycemic_status.csv' def extract(*args,**kwargs): known_args = ['dic', 'all_bac', 's', 'g','f','o','c','p','otu', 'all_non_bac', 'covars', 'blood', 'glucose',", "Beans or Okra Cooked Freq', 'Cauliflower or Broccoli Freq','Sweet Potato Freq', 'Brussels Sprouts,", "cauliflower', 'Roasted eggplant', 'Baguette', 'Lentil Soup', 'Tzfatit Cheese', 'Nectarine', 'Chicken legs', 'Nuts', 'Goat", "no FFQ!!!!! that is why we remove him\" features_to_drop=[] if ('IsGenotek' not in", "'WHR'] = np.nan pheno.loc[pheno.Waist==-9, 'WHR'] = np.nan pheno['LDLCholesterol'] = pheno['Cholesterol,total'] - pheno['HDLCholesterol'] -", "= pandas.read_csv(os.path.join(cleanDataPath, 'PNP_autosomal_clean2_nodfukim_norelated.fam'), delim_whitespace=True, index_col=0, header=None) df_fam = pandas.read_csv(os.path.join(cleanDataPath, 'PNP_autosomal_clean2_nodfukim.fam'), delim_whitespace=True, index_col=0, header=None)", "#print \"Removing participants with missing covars!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\" #print beforeNumParticpants-afterNumParticpants #print \"805175 has no FFQ!!!!!", "'Fruit Salad Freq'] if 'hunger' in args: mb_columns_extra += ['General Hunger','Morning Hunger', 'Midday", "mb_columns += ['Alcoholic_Drinks_Freq','Cigarretes_per_day','Coffee_Freq','Start_smoking_age'] if 'antropo' in args: mb_columns += ['BMI','BPdia','BPsys','HeartRate','Height','Hips','WHR','Waist'] if 's_stats_pheno' in", "'genus', 'family', 'order', 'class', 'phylum']: df_taxa = pandas.read_csv(os.path.join(PNP_16S_DIR, taxa_level+'.txt'), sep='\\t', index_col=0) df_taxa[df_taxa<1e-3] =", "'Garlic Freq', 'Vegetable Soup Freq', 'Hummus Salad Freq', 'Tahini Salad Freq', 'Cooked Vegetable", "[c for c in pheno.columns if c[:4]=='OTU_' ] if 'no_log' in args: assert", "calories pheno.loc[:,mealsColumns]=pheno[mealsColumns][pheno[mealsColumns]!=-9].div(pheno['Calories_kcal_Total'].values,axis=0) pheno.replace(np.nan, 0,inplace=True) mb_columns += mealsColumns ########################FFQ END##################### #for c in pheno:", "'covars', 'blood', 'glucose', 'ffq', 'antropo', 's_stats_pheno', 'fid', 'keep_household', 'no_log', 'keep_related', 'keep_sterile', '16s', '-9',", "in args: mb_columns_extra += ['General Hunger','Morning Hunger', 'Midday Hunger', 'Evening Hunger'] if 'legumes'", "'Cooked Sweet potato', 'Butter', 'Omelette', 'Coated Wafers', 'Boiled corn', 'Chicken drumstick', 'Pita', 'Pasta", "drug_args: if arg in args: mb_columns += [arg] mb_columns_extra=[val.replace(' ','_') for val in", "#Correct by total calories pheno.loc[:,mealsColumns]=pheno[mealsColumns][pheno[mealsColumns]!=-9].div(pheno['Calories_kcal_Total'].values,axis=0) pheno.replace(np.nan, 0,inplace=True) mb_columns += mealsColumns ########################FFQ END##################### #for", "'Olives', 'Balsamic vinegar', 'Peach', 'Light Yellow Cheese', 'Red pepper', 'Bagel', 'Entrecote', 'Cottage cheese',", "period', 'No period','Hormonal replacment', 'Past breastfeeding'] if 'other' in args: #AddingIrisGlucose df_glucose =", "Sugar', 'Peanuts', 'Mayonnaise', 'Persimmon', 'Apple juice', 'Stuffed Peppers', 'Egg', 'Pear', 'Peas', 'Pecan', 'Cooked", "len(set(['all_bac','s', 'g','f','o','c','p','otu']).intersection(set(args)))==0, \\ 'taxa is mutual exclusive with all_bac,s,g,f,o,c,p,otu' if 'include_allPNP' in args:", "Island Dressing, Garlic Dressing Freq', 'Honey, Jam, fruit syrup, Maple syrup Freq', 'White", "+= ['General Hunger','Morning Hunger', 'Midday Hunger', 'Evening Hunger'] if 'legumes' in args: mb_columns_extra", "alpha_diversity_g[alpha_diversity_g < 4].index else: sterile_individuals = alpha_diversity_s[alpha_diversity_s < 15].index pheno=pheno[~pheno.index.isin(sterile_individuals)] if 'keep_household' not", "in args: features_to_drop +=['Age','Gender','Calories_kcal','Carbs_g','Fat_g','Protain_g','IsGenotek'] else: features_to_drop +=['Age','Gender'] if ('include_allPNP' not in args) and", "'Green beans', 'Milk', 'Peanut Butter', 'Cooked carrots', 'Lemon', 'Salty Cookies', 'Beef', 'Meatballs', 'Hamburger", "'Beef Cholent', 'Cracker', 'Chocolate Cookies', 'White beans', 'Cooked zucchini', 'Sweet potato', 'Wine', 'Cookies',", "'Popsicle Without Dairy Freq', 'Black or White Grains, Watermelon Seeds Freq', 'Nuts, almonds,", "'Diet Fruit Drink', 'Corn schnitzel', 'Cappuccino', 'Low fat Milk', 'Pickled cucumber', 'Soymilk', 'Dates',", "len(presence)*ratio].index.values.tolist() pheno=pheno[other_columns+presence] if ('keep_related' not in args): #bed = Bed(os.path.join(cleanDataPath, 'PNP_autosomal_clean2_nodfukim_norelated'), count_A1=True)#.read() df_fam_no_related", "pass #print \"Removing participants with missing covars!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\" #print beforeNumParticpants-afterNumParticpants #print \"805175 has no", "if 'dic' in args: pheno =pandas.read_csv(pheno_fn_bacDic,sep='\\t') pheno.set_index('IID', inplace=True, drop=True) pheno_nodic =pandas.read_csv(pheno_fn_bac,sep='\\t') pheno_nodic.set_index('IID', inplace=True,", "in args): pheno = pheno[[c for c in pheno if c[:2] not in", "inplace=True) if 'permute' in args: pheno=pandas.DataFrame(pheno.values[np.random.permutation(pheno.shape[0])],index=pheno.index,columns=pheno.columns) return pheno if __name__==\"__main__\": # pheno=extract('dic','covars','keep_household',\"pastry\",ratio=0.2)#'all_bac' phenoAll", "period','Hormonal replacment', 'Past breastfeeding'] if 'other' in args: #AddingIrisGlucose df_glucose = pandas.read_csv(glycemicStatusPath).set_index('RegNum') pheno", "args: pheno =pandas.read_csv(pheno_fn_bacAllPNP,sep='\\t') else: pheno =pandas.read_csv(pheno_fn_bac,sep='\\t') pheno.set_index('IID', inplace=True, drop=True) if 'include_allPNP'in args: status,", "list(kwargs.keys()): assert kwarg in known_kwargs, 'unkown kwarg: %s'%(kwarg) if ('16s' in args): assert", "in Pita Bread Freq', 'Cooked Legumes Freq', 'Processed Meat Free Products Freq'] if", "potato', 'Butter', 'Omelette', 'Coated Wafers', 'Boiled corn', 'Chicken drumstick', 'Pita', 'Pasta Bolognese', 'Chicken", "'Pasta Bolognese', 'Chicken Meatballs', 'Burekas', 'Carrots', 'Tofu', 'Wholemeal Pita', 'Sunflower seeds', 'Coriander', 'Ciabatta',", "= df_household[[c for c in df_household.columns if int(c) in pheno.index]] df_household = df_household[df_household.index.isin(pheno.index)]", "Freq','Processed Meat Products Freq','Beef, Veal, Lamb, Pork, Steak, Golash Freq', 'Mixed Meat Dishes", "that is why we remove him\" features_to_drop=[] if ('IsGenotek' not in args) and", "not in args) and ('covars' not in args) and ('covars_noPCs' not in args):", "not in args) and ('covars_noPCs' not in args) and ('other' not in args):", "not support dicotomize bacteria' if 'IsGenotek' in args: assert 'covars' not in args,", "c in pheno.columns if c[:2]=='o_' ] if 'c' in args: mb_columns += [c", "for c in pheno.columns if c[:2]=='c_' ] if 'p' in args: mb_columns +=", "Drink', 'Corn schnitzel', 'Cappuccino', 'Low fat Milk', 'Pickled cucumber', 'Soymilk', 'Dates', 'Croissant', 'Biscuit',", "Soup Freq', 'Hummus Salad Freq', 'Tahini Salad Freq', 'Cooked Vegetable Salads Freq', 'Pickled", "# if np.isnan(pheno.loc[participant,'Calories_kcal']) or \\ # np.isnan(pheno.loc[participant,'Carbs_g']) or \\ # np.isnan(pheno.loc[participant,'Fat_g']) or \\", "'Goat Cheese', 'Jam', 'Feta Cheese', 'Mandarin', 'Pesto', 'Sugar substitute', 'Cheesecake', 'Raisins', 'Chocolate', 'Quinoa',", "| cut -f 1 -d '_'\"%os.path.join(rawDataPath,'tmp','dfukim.txt')) pheno =pheno[~pheno.index.isin([int(dafook) for dafook in output.split('\\n')])] if", "in args: mb_columns += ['95P_Glucose','Glucose_Noise','Max_Glucose','Median_Glucose','WakeupGlucose', 'MeanGlucoseResponse','MeanBreadResponse','MeanBreadButterResponse'] if 'ffq' in args: mb_columns += ['Alcoholic_Drinks_Freq','Cigarretes_per_day','Coffee_Freq','Start_smoking_age']", "'Onion Freq', 'Garlic Freq', 'Vegetable Soup Freq', 'Hummus Salad Freq', 'Tahini Salad Freq',", "args: assert 'dic' not in args, 'include_allPNP does not support dicotomize bacteria' if", "oil', #u'Parmesan\\xc2\\xa0cheese', 'Guacamole', 'Coleslaw', 'Americano', 'Pesek Zman snack', 'Green onions', 'Mushrooms', 'Lemon juice',", "df_taxa[df_taxa<1e-3] = 1e-4 df_taxa = np.log10(df_taxa) pheno = pheno.merge(df_taxa, left_index=True, right_index=True) pheno_s =", "in args, '16s and dic are mutually exclusive' if ('taxa' in list(kwargs.keys())): assert", "def extract(*args,**kwargs): known_args = ['dic', 'all_bac', 's', 'g','f','o','c','p','otu', 'all_non_bac', 'covars', 'blood', 'glucose', 'ffq',", "Strawberries Freq', 'Mango Freq', 'Peach, Nectarine, Plum Freq', 'Pear Fresh, Cooked or Canned", "['Egg Recipes Freq', 'Egg, Hard Boiled or Soft Freq', 'Schnitzel Turkey or Chicken", "beets', 'Almonds', 'Falafel', 'Noodles', 'Jachnun', 'Turkey', 'Sushi', 'Brazil nuts', 'Orange', 'Rice', 'Diet Fruit", "'Vegetable Salad', 'Fried eggplant', 'Salmon', 'Cashew', 'Jewish donut', 'Rugelach', 'Cake', 'Ravioli', 'Tomatoes', 'Wholemeal", "pheno[[c for c in pheno.columns if c[:2]=='g_']] ### for c in pheno: ###", "'Melawach', 'White cake', 'Apple', 'Lettuce Salad', 'Cereals', 'Yellow Cheese', 'Tea', 'Beer', 'Mozzarella Cheese',", "','_') for val in meals] #Correct by total calories pheno.loc[:,mealsColumns]=pheno[mealsColumns][pheno[mealsColumns]!=-9].div(pheno['Calories_kcal_Total'].values,axis=0) pheno.replace(np.nan, 0,inplace=True) mb_columns", "'Mushrooms', 'Lemon juice', 'Canned Tuna Fish', 'Vegetable Salad', 'Fried eggplant', 'Salmon', 'Cashew', 'Jewish", "'Beer Freq', 'Sweet Dry Wine, Cocktails Freq', 'Alcoholic Drinks Freq'] if 'fruits' in", "Croissant or Donut Freq', 'Cake, Torte Cakes, Chocolate Cake Freq', 'Fruit Pie or", "not in args) and ('covars_noPCs' not in args): features_to_drop += ['IsGenotek'] if ('covars'", "# np.isnan(pheno.loc[participant,'Protain_g']): # sum+=1 # print participant # print pheno.loc[participant,['Calories_kcal','Carbs_g','Fat_g','Protain_g','Protain_g']] # print sum", "args: mb_columns += [c for c in pheno.columns if c[:2]=='PC'] if 'lactose' in", "['Ordinary Bread or Challah Freq', 'Light Bread Freq', 'Wholemeal or Rye Bread Freq',", "Liver', 'Sweet Challah', 'Minced meat', 'Chocolate cake', 'Diet Coke', 'Dried dates', 'Carrot Cake',", "'VegeterianScale']=np.nan if 'drugs' in args: mb_columns+=drug_args else: for arg in drug_args: if arg", "Crackers', 'Sugar Free Gum', 'Hamburger', 'Dark Beer', 'Cooked beets', 'Almonds', 'Falafel', 'Noodles', 'Jachnun',", "nuts', 'Orange', 'Rice', 'Diet Fruit Drink', 'Corn schnitzel', 'Cappuccino', 'Low fat Milk', 'Pickled", "pheno=pheno.drop(features_to_drop,axis=1) if ('-9' not in args): pheno.replace(-9, np.nan, inplace=True) if 'permute' in args:", "'Pasta', 'Sauteed vegetables', 'Plum', 'Goat Milk Yogurt', 'Orange juice', 'Potatoes', 'Halva', 'Yellow pepper',", "'Hamburger sandwich', 'Chicken thighs', 'Granola', 'Beet', 'Couscous', 'Beet Salad', 'Chocolate Mousse Cake', 'Sweet", "#pheno=pheno[pheno.index.isin(noSharedEnvIID[0].astype(int).values)] #new code that decides which individuals to remove on the fly import", "'Coated Wafers', 'Boiled corn', 'Chicken drumstick', 'Pita', 'Pasta Bolognese', 'Chicken Meatballs', 'Burekas', 'Carrots',", "in pheno.columns if c[:2]=='c_' ] if 'p' in args: mb_columns += [c for", "'Schnitzel Turkey or Chicken Freq', 'Chicken or Turkey With Skin Freq', 'Chicken or", "Watermelon Seeds Freq', 'Nuts, almonds, pistachios Freq','Peanuts Freq'] if 'vegetables' in args: mb_columns_extra", "['IsGenotek'] if ('covars' not in args) and ('covars_noPCs' not in args) and ('other'", "val in meals] #Correct by total calories pheno.loc[:,mealsColumns]=pheno[mealsColumns][pheno[mealsColumns]!=-9].div(pheno['Calories_kcal_Total'].values,axis=0) pheno.replace(np.nan, 0,inplace=True) mb_columns += mealsColumns", "'Chocolate Cookies', 'White beans', 'Cooked zucchini', 'Sweet potato', 'Wine', 'Cookies', 'Challah', 'Spelled', 'Honey',", "such as Oatmeal Porridge Freq', 'Rice Freq','Couscous, Burgul, Mamaliga, Groats Freq', 'Potatoes Boiled,", "args: mb_columns_extra += ['Oil as an addition for Salads or Stews Freq','Mayonnaise Including", "in pheno.columns if c[:2]=='g_']] ### for c in pheno: ### if (c[:2] not", "'no_log', 'keep_related', 'keep_sterile', '16s', '-9', 'covars_noPCs', 'PCs', 'lactose','include_allPNP','IsGenotek','permute','meals','other','drugs', 'calories','bloodType','questionnaires','keep_missingCovars','activity','activityTypesFreq','cereals', 'delivery','dressSweetners','drinks','fruits','hunger', 'legumes','meatProducts','pastry','qualityOfLiving', 'smoking','sweets','vegetables','womenOnlyQuestions', 'genotek_only',", "'Decaffeinated Coffee Freq', 'Coffee Freq', 'Herbal Tea Freq', 'Green Tea Freq', 'Regular Tea", "Fennel, Dill, Cilantro, Green Onion Freq', 'Fresh Vegetable Salad Without Dressing or Oil", "if 'activityTypesFreq' in args: mb_columns_extra += ['T1Activity kind','Type 1 activity - freq','T2Activity kind',", "in args: mb_columns_extra += ['T1Activity kind','Type 1 activity - freq','T2Activity kind', 'Type 2", "Chicken Freq', 'Chicken or Turkey With Skin Freq', 'Chicken or Turkey Without Skin", "if ('keep_sterile') not in args: if '16s' in args: sterile_individuals = alpha_diversity_g[alpha_diversity_g <", "('covars' not in args) and ('covars_noPCs' not in args) and ('other' not in", "\"805175 has no FFQ!!!!! that is why we remove him\" features_to_drop=[] if ('IsGenotek'", "in pheno.columns if c[:4]=='OTU_' ] if 'no_log' in args: assert 'dic' not in", "cream', 'Coconut milk', 'Pretzels', 'Kohlrabi', 'Eggplant Salad', 'Cooked green beans', 'Cooked mushrooms', 'Watermelon',", "in pheno_nodic.columns if c[:2]=='g_']] else: if 'include_allPNP' in args: pheno =pandas.read_csv(pheno_fn_bacAllPNP,sep='\\t') else: pheno", "'p' in args: mb_columns += [c for c in pheno.columns if c[:2]=='p_' ]", "if 'fruits' in args: mb_columns_extra += ['Mandarin or Clementine Freq', 'Orange or Grapefruit", "if 'drinks' in args: mb_columns_extra += ['Nectar, Cider Freq', 'Diet Juice Freq', 'Juice", "not in args): features_to_drop+=['PC1','PC2','PC3','PC4','PC5'] pheno=pheno.drop(features_to_drop,axis=1) if ('-9' not in args): pheno.replace(-9, np.nan, inplace=True)", "'Eggplant Salad', 'Cooked green beans', 'Cooked mushrooms', 'Watermelon', 'Grilled cheese', 'Bissli', 'Pullet', 'Hummus',", "- 2*pheno['Triglycerides'] if 'genotek_only' in args: pheno = pheno.loc[pheno['IsGenotek']==1] if 'swab_only' in args:", "+= ['BMI','BPdia','BPsys','HeartRate','Height','Hips','WHR','Waist'] if 's_stats_pheno' in args: s_stats=['BMI','Cholesterol,total','WakeupGlucose','Albumin','Creatinine','HbA1C%','Height','Hips','Waist','WHR','HDLCholesterol'] #, 'Triglycerides', 'LDLCholesterol'] mb_columns+=s_stats mb_columns=list(set(mb_columns)) if", "-d '_'\"%os.path.join(rawDataPath,'tmp','dfukim.txt')) pheno =pheno[~pheno.index.isin([int(dafook) for dafook in output.split('\\n')])] if ('16s' in args): pheno", "args: assert 'covars' not in args, 'IsGenotek and covars are mutually exclusive' if", "Bernflaks Freq','Cooked Cereal such as Oatmeal Porridge Freq', 'Rice Freq','Couscous, Burgul, Mamaliga, Groats", "'Cooked green beans', 'Cooked mushrooms', 'Watermelon', 'Grilled cheese', 'Bissli', 'Pullet', 'Hummus', 'Chinese Chicken", "as np import sys from pysnptools.snpreader.bed import Bed import subprocess cleanDataPath='/net/mraid08/export/jafar/Microbiome/Analyses/PNPChip/cleanData/' rawDataPath='/net/mraid08/export/jafar/Microbiome/Analyses/PNPChip/rawData' pheno_fn_bac", "or White Grains, Watermelon Seeds Freq', 'Nuts, almonds, pistachios Freq','Peanuts Freq'] if 'vegetables'", "Sodas with Sugar Freq', 'Decaffeinated Coffee Freq', 'Coffee Freq', 'Herbal Tea Freq', 'Green", "in args: mb_columns += ['Alcoholic_Drinks_Freq','Cigarretes_per_day','Coffee_Freq','Start_smoking_age'] if 'antropo' in args: mb_columns += ['BMI','BPdia','BPsys','HeartRate','Height','Hips','WHR','Waist'] if", "'Dark Beer', 'Cooked beets', 'Almonds', 'Falafel', 'Noodles', 'Jachnun', 'Turkey', 'Sushi', 'Brazil nuts', 'Orange',", "mins','Physical activity - freq'] if 'activityTypesFreq' in args: mb_columns_extra += ['T1Activity kind','Type 1", "c in pheno.columns if c[:4]=='OTU_' ] if 'no_log' in args: assert 'dic' not", "'c' in args: mb_columns += [c for c in pheno.columns if c[:2]=='c_' ]", "if set(['Age','Gender','Calories_kcal','Carbs_g','Fat_g','Protain_g','IsGenotek'])<=set(pheno.columns.values): keep_inds=pheno.loc[:,['Age','Gender','Calories_kcal','Carbs_g','Fat_g','Protain_g','IsGenotek']].replace(-9, np.nan).dropna().index.values beforeNumParticpants=pheno.shape[0] pheno=pheno.loc[keep_inds] afterNumParticpants=pheno.shape[0] if beforeNumParticpants-afterNumParticpants>0: pass #print \"Removing participants", "pandas.read_csv(os.path.join(cleanDataPath, 'EnvironmentBlock.txt'), delim_whitespace=True) df_household = df_household[[c for c in df_household.columns if int(c) in", "'Mayonnaise', 'Persimmon', 'Apple juice', 'Stuffed Peppers', 'Egg', 'Pear', 'Peas', 'Pecan', 'Cooked cauliflower', 'Cooked", "] if 'p' in args: mb_columns += [c for c in pheno.columns if", "Meatballs, Beef, Chicken Freq', 'Shish Kebab in Pita Bread Freq', 'Falafel in Pita", "mb_columns_extra += ['Falafel in Pita Bread Freq', 'Cooked Legumes Freq', 'Processed Meat Free", "= pheno_nodic[[c for c in pheno_nodic.columns if c[:2]=='s_']] pheno_g = pheno_nodic[[c for c", "['Tomato Freq','Cooked Tomatoes, Tomato Sauce, Tomato Soup Freq', 'Red Pepper Freq', 'Green Pepper", "in args: pheno=pandas.DataFrame(pheno.values[np.random.permutation(pheno.shape[0])],index=pheno.index,columns=pheno.columns) return pheno if __name__==\"__main__\": # pheno=extract('dic','covars','keep_household',\"pastry\",ratio=0.2)#'all_bac' phenoAll = extract('s','include_allPNP','covars')#'include_allPNP','keep_household','ffq','keep_related')#'include_allPNP', print(phenoAll.shape)", "'Cuba', 'Chicken Liver', 'Sweet Challah', 'Minced meat', 'Chocolate cake', 'Diet Coke', 'Dried dates',", "not in args): #bed = Bed(os.path.join(cleanDataPath, 'PNP_autosomal_clean2_nodfukim_norelated'), count_A1=True)#.read() df_fam_no_related = pandas.read_csv(os.path.join(cleanDataPath, 'PNP_autosomal_clean2_nodfukim_norelated.fam'), delim_whitespace=True,", "= pheno[[c for c in pheno if c[:2] not in ('s_', 'g_', 'f_',", "'class', 'phylum']: df_taxa = pandas.read_csv(os.path.join(PNP_16S_DIR, taxa_level+'.txt'), sep='\\t', index_col=0) df_taxa[df_taxa<1e-3] = 1e-4 df_taxa =", "args: mb_columns_extra += ['Tomato Freq','Cooked Tomatoes, Tomato Sauce, Tomato Soup Freq', 'Red Pepper", "no 'Calories_kcal','Carbs_g','Fat_g','Protain_g' #3 participant 86356,762339,805175 have no 'Age','Gender' # if set(['Age','Gender','Calories_kcal','Carbs_g','Fat_g','Protain_g','IsGenotek'])<=set(pheno.columns.values): keep_inds=pheno.loc[:,['Age','Gender','Calories_kcal','Carbs_g','Fat_g','Protain_g','IsGenotek']].replace(-9, np.nan).dropna().index.values", "are mutually exclusive' pheno[mb_columns] = 10**pheno[mb_columns] if 'all_non_bac' in args: args=list(args)+['covars','blood','glucose','ffq','antropo'] mb_columns +=", "meat', 'Chocolate cake', 'Diet Coke', 'Dried dates', 'Carrot Cake', 'Doritos', 'Israeli couscous', 'Pistachio',", "Salad Freq', 'Cooked Vegetable Salads Freq', 'Pickled Vegetables Freq', 'Olives Freq'] if 'womenOnlyQuestions'", "c[:2] in ['s_','g_','f_','o_','c_','p_']] other_columns = [c for c in pheno.columns if c[:2] not", "args) and ('covars' not in args) and ('covars_noPCs' not in args): features_to_drop +=", "'Irregular period', 'No period','Hormonal replacment', 'Past breastfeeding'] if 'other' in args: #AddingIrisGlucose df_glucose", "print participant # print pheno.loc[participant,['Calories_kcal','Carbs_g','Fat_g','Protain_g','Protain_g']] # print sum if np.isnan(pheno.loc[participant,'Age']) or np.isnan(pheno.loc[participant,'Gender']) :", "Bream', 'Garlic', 'Grapes', 'Chocolate Chip Cookies', 'Cucumber', 'Mung Bean', 'Ketchup', 'Sweet Yogurt', 'Bread',", "pheno_nodic.columns if c[:2]=='s_']] pheno_g = pheno_nodic[[c for c in pheno_nodic.columns if c[:2]=='g_']] else:", "'calories' not in args: features_to_drop +=['Age','Gender','Calories_kcal','Carbs_g','Fat_g','Protain_g','IsGenotek'] else: features_to_drop +=['Age','Gender'] if ('include_allPNP' not in", "pheno_g = pheno[[c for c in pheno.columns if c[:2]=='g_']] ### for c in", "pheno.replace(np.nan, 0,inplace=True) mb_columns += mealsColumns ########################FFQ END##################### #for c in pheno: print c", "('include_allPNP' not in args) and ('PCs' not in args): features_to_drop+=['PC1','PC2','PC3','PC4','PC5'] pheno=pheno.drop(features_to_drop,axis=1) if ('-9'", "pheno =pandas.read_csv(pheno_fn_bacDic,sep='\\t') pheno.set_index('IID', inplace=True, drop=True) pheno_nodic =pandas.read_csv(pheno_fn_bac,sep='\\t') pheno_nodic.set_index('IID', inplace=True, drop=True) pheno_s = pheno_nodic[[c", "'Egg', 'Pear', 'Peas', 'Pecan', 'Cooked cauliflower', 'Cooked Sweet potato', 'Butter', 'Omelette', 'Coated Wafers',", "Jam, fruit syrup, Maple syrup Freq', 'White or Brown Sugar Freq', 'Artificial Sweeteners", "'D.pain', 'D.CVD', 'D.GI','D.Thyroid', 'D.NSAID','D.Contraception'] meals=['Vodka or Arak', 'Avocado', 'Parsley', 'Coated peanuts', 'Sugar', 'Smoked", "Salad', 'Cooked green beans', 'Cooked mushrooms', 'Watermelon', 'Grilled cheese', 'Bissli', 'Pullet', 'Hummus', 'Chinese", "Freq', 'Olives Freq'] if 'womenOnlyQuestions' in args: mb_columns_extra += ['Is pregnant','Is breastfeeding','Is after", "in args) and ('covars_noPCs' not in args) and ('other' not in args): if", "seeds', 'Coriander', 'Ciabatta', 'Tomato sauce', 'Heavy cream', 'Banana', 'Kif Kef', 'Mustard', 'Coke', 'Vegetable", "'k_', 'p_', 'f_']): print c alpha_diversity_s = (pheno_s>pheno_s.min().min()).sum(axis=1) alpha_diversity_g = (pheno_g>pheno_g.min().min()).sum(axis=1) pheno.loc[pheno.Hips==-9, 'WHR']", "assert 'dic' not in args, 'include_allPNP does not support dicotomize bacteria' if 'IsGenotek'", "2*pheno['Triglycerides'] if 'genotek_only' in args: pheno = pheno.loc[pheno['IsGenotek']==1] if 'swab_only' in args: pheno", "args): pheno.replace(-9, np.nan, inplace=True) if 'permute' in args: pheno=pandas.DataFrame(pheno.values[np.random.permutation(pheno.shape[0])],index=pheno.index,columns=pheno.columns) return pheno if __name__==\"__main__\":", "in args: mb_columns += [c for c in pheno.columns if c[:2]=='o_' ] if", "pheno[[c for c in pheno if c[:2] not in ('s_', 'g_', 'f_', 'o_',", "for c in pheno if c[:2] not in ('s_', 'g_', 'f_', 'o_', 'c_',", "for kwarg in list(kwargs.keys()): assert kwarg in known_kwargs, 'unkown kwarg: %s'%(kwarg) if ('16s'", "Freq', 'Sausages Freq', 'Sausages such as Salami Freq', 'Pastrami or Smoked Turkey Breast", "or ('PCs') in args: mb_columns += [c for c in pheno.columns if c[:2]=='PC']", "'ffq', 'antropo', 's_stats_pheno', 'fid', 'keep_household', 'no_log', 'keep_related', 'keep_sterile', '16s', '-9', 'covars_noPCs', 'PCs', 'lactose','include_allPNP','IsGenotek','permute','meals','other','drugs',", "'Chicken thighs', 'Granola', 'Beet', 'Couscous', 'Beet Salad', 'Chocolate Mousse Cake', 'Sweet Roll', 'Danish',", "taxadf=pheno.filter(regex=(taxa)) mb_columns += taxadf.columns.values.tolist() if 'all_bac' in args: args=list(args)+['s','g','f','o','c','p'] if 's' in args:", "'bloodType' in args: mb_columns_extra += ['Blood A','Blood B','Blood RH-'] if 'cereals' in args:", "+= ['Nectar, Cider Freq', 'Diet Juice Freq', 'Juice Freq', 'Diet Soda Freq', 'Regular", "'Tahini Salad Freq', 'Cooked Vegetable Salads Freq', 'Pickled Vegetables Freq', 'Olives Freq'] if", "+= ['Milk or Dark Chocolate Freq', 'Salty Snacks Freq', 'Cheese Cakes or Cream", "<reponame>yochaiedlitz/T2DM_UKB_predictions import pandas import os import numpy as np import sys from pysnptools.snpreader.bed", "'Cashew', 'Jewish donut', 'Rugelach', 'Cake', 'Ravioli', 'Tomatoes', 'Wholemeal Light Bread', 'Marble Cake', 'Brown", "in output.split('\\n')])] if ('16s' in args): pheno = pheno[[c for c in pheno", "schnitzel', 'Cappuccino', 'Low fat Milk', 'Pickled cucumber', 'Soymilk', 'Dates', 'Croissant', 'Biscuit', 'Potato chips',", "cream', 'Banana', 'Kif Kef', 'Mustard', 'Coke', 'Vegetable Soup', 'Sausages', 'Pancake', 'Pasta', 'Sauteed vegetables',", "pheno_s = pheno_nodic[[c for c in pheno_nodic.columns if c[:2]=='s_']] pheno_g = pheno_nodic[[c for", "Tuna) Pickled, Dried, Smoked, Canned Freq'] if 'pastry' in args: mb_columns_extra += ['Ordinary", "'Apple Freq', 'Apricot Fresh or Dry, or Loquat Freq', 'Grapes or Raisins Freq',", "Freq','Persimmon Freq', 'Watermelon Freq', 'Dried Fruits Freq', 'Fruit Salad Freq'] if 'hunger' in", "Bread Freq', 'Baguette Freq', 'Roll or Bageles Freq', 'Pita Freq', 'Saltine Crackers or", "'Fresh Vegetable Salad With Dressing or Oil Freq', 'Avocado Freq','Lemon Freq', 'Onion Freq',", "With Skin Freq', 'Chicken or Turkey Without Skin Freq', 'Sausages Freq', 'Sausages such", "header=None) df_related=df_fam[~df_fam.index.isin(df_fam_no_related.index)] pheno=pheno[(~pheno.index.isin(df_related.index))] if ('keep_sterile') not in args: if '16s' in args: sterile_individuals", "['Is pregnant','Is breastfeeding','Is after birth', 'Taking contraceptives', 'Regular period', 'Irregular period', 'No period','Hormonal", "import os import numpy as np import sys from pysnptools.snpreader.bed import Bed import", "['Milk or Dark Chocolate Freq', 'Salty Snacks Freq', 'Cheese Cakes or Cream Cakes", "Freq', 'Sausages such as Salami Freq', 'Pastrami or Smoked Turkey Breast Freq', 'Turkey", "'Lentils', 'Mutton', 'Wholemeal Noodles', 'Green Tea', 'Schnitzel', 'Brown Sugar', 'Peanuts', 'Mayonnaise', 'Persimmon', 'Apple", "'Coke', 'Vegetable Soup', 'Sausages', 'Pancake', 'Pasta', 'Sauteed vegetables', 'Plum', 'Goat Milk Yogurt', 'Orange", "'Ravioli', 'Tomatoes', 'Wholemeal Light Bread', 'Marble Cake', 'Brown Rice', 'Cold cut', 'Gilthead Bream',", "Meat Products Freq','Beef, Veal, Lamb, Pork, Steak, Golash Freq', 'Mixed Meat Dishes as", "#noSharedEnvIID=pandas.read_csv(iidsNoSharedEnv,usecols=[0],header=None,sep='\\t') #pheno=pheno[pheno.index.isin(noSharedEnvIID[0].astype(int).values)] #new code that decides which individuals to remove on the fly", "pheno: print c mb_columns=list(set(mb_columns)) pheno= pheno[mb_columns] if 'threshold' not in kwargs: threshold =", "index_col=0, header=None) df_fam = pandas.read_csv(os.path.join(cleanDataPath, 'PNP_autosomal_clean2_nodfukim.fam'), delim_whitespace=True, index_col=0, header=None) df_related=df_fam[~df_fam.index.isin(df_fam_no_related.index)] pheno=pheno[(~pheno.index.isin(df_related.index))] if ('keep_sterile')", "'Diet Coke', 'Dried dates', 'Carrot Cake', 'Doritos', 'Israeli couscous', 'Pistachio', 'Date honey', 'Vinaigrette',", "in args: mb_columns_extra += ['Oil as an addition for Salads or Stews Freq','Mayonnaise", "in args: mb_columns+=drug_args else: for arg in drug_args: if arg in args: mb_columns", "activity - freq'] if 'activityTypesFreq' in args: mb_columns_extra += ['T1Activity kind','Type 1 activity", "in args: mealsColumns=[val.replace(' ','_') for val in meals] #Correct by total calories pheno.loc[:,mealsColumns]=pheno[mealsColumns][pheno[mealsColumns]!=-9].div(pheno['Calories_kcal_Total'].values,axis=0)", "'Pastrami or Smoked Turkey Breast Freq', 'Turkey Meatballs, Beef, Chicken Freq', 'Shish Kebab", "Freq','Peas, Green Beans or Okra Cooked Freq', 'Cauliflower or Broccoli Freq','Sweet Potato Freq',", "Salad', 'Fried eggplant', 'Salmon', 'Cashew', 'Jewish donut', 'Rugelach', 'Cake', 'Ravioli', 'Tomatoes', 'Wholemeal Light", "activity - mins','Physical activity - freq'] if 'activityTypesFreq' in args: mb_columns_extra += ['T1Activity", "Freq', 'Cucumber Freq', 'Zucchini or Eggplant Freq','Peas, Green Beans or Okra Cooked Freq',", "not in args): if 'calories' not in args: features_to_drop +=['Age','Gender','Calories_kcal','Carbs_g','Fat_g','Protain_g','IsGenotek'] else: features_to_drop +=['Age','Gender']", "known_args, 'unkown arg: %s'%(arg) for kwarg in list(kwargs.keys()): assert kwarg in known_kwargs, 'unkown", "Bread or Challah Freq', 'Light Bread Freq', 'Wholemeal or Rye Bread Freq', 'Baguette", "Freq'] if 'hunger' in args: mb_columns_extra += ['General Hunger','Morning Hunger', 'Midday Hunger', 'Evening", "'antropo', 's_stats_pheno', 'fid', 'keep_household', 'no_log', 'keep_related', 'keep_sterile', '16s', '-9', 'covars_noPCs', 'PCs', 'lactose','include_allPNP','IsGenotek','permute','meals','other','drugs', 'calories','bloodType','questionnaires','keep_missingCovars','activity','activityTypesFreq','cereals',", "os import numpy as np import sys from pysnptools.snpreader.bed import Bed import subprocess", "[c for c in pheno.columns if c[:2]=='o_' ] if 'c' in args: mb_columns", "'Challah', 'Spelled', 'Honey', 'Green beans', 'Milk', 'Peanut Butter', 'Cooked carrots', 'Lemon', 'Salty Cookies',", "Bageles Freq', 'Pita Freq', 'Saltine Crackers or Matzah Freq', 'Wholemeal Crackers Freq', 'Small", "+1e-5)&(pheno[mb_columns]!=0)).astype(int).sum() else: presence=(pheno[mb_columns]>threshold +1e-5).astype(int).sum() presence=presence[presence > len(presence)*ratio].index.values.tolist() pheno=pheno[other_columns+presence] if ('keep_related' not in args):", "'smoking' in args: mb_columns_extra += ['Currently smokes','Ever smoked'] if 'sweets' in args: mb_columns_extra", "for c in pheno.columns if c[:2]=='s_']] pheno_g = pheno[[c for c in pheno.columns", "'Honey', 'Green beans', 'Milk', 'Peanut Butter', 'Cooked carrots', 'Lemon', 'Salty Cookies', 'Beef', 'Meatballs',", "'unkown arg: %s'%(arg) for kwarg in list(kwargs.keys()): assert kwarg in known_kwargs, 'unkown kwarg:", "'Meatballs', 'Hamburger sandwich', 'Chicken thighs', 'Granola', 'Beet', 'Couscous', 'Beet Salad', 'Chocolate Mousse Cake',", "elif kwargs['taxa'][1]=='_': kwargs['taxa']=[kwargs['taxa']] for taxa in kwargs['taxa']: taxadf=pheno.filter(regex=(taxa)) mb_columns += taxadf.columns.values.tolist() if 'all_bac'", "np import sys from pysnptools.snpreader.bed import Bed import subprocess cleanDataPath='/net/mraid08/export/jafar/Microbiome/Analyses/PNPChip/cleanData/' rawDataPath='/net/mraid08/export/jafar/Microbiome/Analyses/PNPChip/rawData' pheno_fn_bac =os.path.join(cleanDataPath,'noMissingKakiPhenotypesWithCovariates_nodfukim.phenotypes')", "taxa_level in ['otu', 'species', 'genus', 'family', 'order', 'class', 'phylum']: df_taxa = pandas.read_csv(os.path.join(PNP_16S_DIR, taxa_level+'.txt'),", "'f_', 'o_', 'c_', 'p_')] elif kwargs['taxa'][1]=='_': kwargs['taxa']=[kwargs['taxa']] for taxa in kwargs['taxa']: taxadf=pheno.filter(regex=(taxa)) mb_columns", "Crackers or Matzah Freq', 'Wholemeal Crackers Freq', 'Small Burekas Freq', 'Jachnun, Mlawah, Kubana,", "pheno=pheno[(~pheno.index.isin(df_related.index))] if ('keep_sterile') not in args: if '16s' in args: sterile_individuals = alpha_diversity_g[alpha_diversity_g", "Garlic Dressing Freq', 'Honey, Jam, fruit syrup, Maple syrup Freq', 'White or Brown", "if c[:2]=='s_']] pheno_g = pheno_nodic[[c for c in pheno_nodic.columns if c[:2]=='g_']] else: if", "'Sausages', 'Pancake', 'Pasta', 'Sauteed vegetables', 'Plum', 'Goat Milk Yogurt', 'Orange juice', 'Potatoes', 'Halva',", "if 'meatProducts' in args: mb_columns_extra += ['Egg Recipes Freq', 'Egg, Hard Boiled or", "### if (c[:2] not in ['c_', 'g_', 'o_', 's_', 'k_', 'p_', 'f_']): print", "in args: mb_columns += ['lactose'] if 'blood' in args: # mb_columns += ['ALT','Albumin','AST','Basophils%','Calcium','Chloride','Cholesterol,total','Creatinine',", "['otu', 'species', 'genus', 'family', 'order', 'class', 'phylum']: df_taxa = pandas.read_csv(os.path.join(PNP_16S_DIR, taxa_level+'.txt'), sep='\\t', index_col=0)", "(not Tuna) Pickled, Dried, Smoked, Canned Freq'] if 'pastry' in args: mb_columns_extra +=", "c[:2]=='g_' ] if 'f' in args: mb_columns += [c for c in pheno.columns", "#new code that decides which individuals to remove on the fly import ForPaper.VertexCut", "pheno.columns if c[:2]=='c_' ] if 'p' in args: mb_columns += [c for c", "if c[:2]=='f_' ] if 'o' in args: mb_columns += [c for c in", "+= ['Cornflakes Freq','Granola or Bernflaks Freq','Cooked Cereal such as Oatmeal Porridge Freq', 'Rice", "'Sushi', 'Brazil nuts', 'Orange', 'Rice', 'Diet Fruit Drink', 'Corn schnitzel', 'Cappuccino', 'Low fat", "if 'no_log' in args: assert 'dic' not in args, 'dic and no_log are", "remove on the fly import ForPaper.VertexCut as vc df_household = pandas.read_csv(os.path.join(cleanDataPath, 'EnvironmentBlock.txt'), delim_whitespace=True)", "Bread', 'Wholemeal Crackers', 'Sugar Free Gum', 'Hamburger', 'Dark Beer', 'Cooked beets', 'Almonds', 'Falafel',", "'Stuffed Peppers', 'Egg', 'Pear', 'Peas', 'Pecan', 'Cooked cauliflower', 'Cooked Sweet potato', 'Butter', 'Omelette',", "mb_columns += [c for c in pheno.columns if c[:2]=='p_' ] if 'otu' in", "'Simple Cookies or Biscuits Freq', 'Ice Cream or Popsicle which contains Dairy Freq',", "Freq', 'Small Burekas Freq', 'Jachnun, Mlawah, Kubana, Cigars Freq', 'Pizza Freq'] if 'qualityOfLiving'", "Lamb, Pork, Steak, Golash Freq', 'Mixed Meat Dishes as Moussaka, Hamin, Cuba Freq',", "or Chicken Freq', 'Chicken or Turkey With Skin Freq', 'Chicken or Turkey Without", "'delivery' in args: mb_columns_extra += ['C-Section','Home delivery','Was breastfed'] if 'dressSweetners' in args: mb_columns_extra", "Baked, Mashed, Potatoes Salad Freq', 'Fries Freq', 'Pasta or Flakes Freq'] if 'delivery'", "or Grapefruit Juice Freq', 'Apple Freq', 'Apricot Fresh or Dry, or Loquat Freq',", "Freq', 'Vegetable Soup Freq', 'Hummus Salad Freq', 'Tahini Salad Freq', 'Cooked Vegetable Salads", "with missing covars!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\" #print beforeNumParticpants-afterNumParticpants #print \"805175 has no FFQ!!!!! that is why", "(pheno_g>pheno_g.min().min()).sum(axis=1) pheno.loc[pheno.Hips==-9, 'WHR'] = np.nan pheno.loc[pheno.Waist==-9, 'WHR'] = np.nan pheno['LDLCholesterol'] = pheno['Cholesterol,total'] -", "pheno = pheno[[c for c in pheno if c[:2] not in ('s_', 'g_',", "'Whipped cream', 'Coconut milk', 'Pretzels', 'Kohlrabi', 'Eggplant Salad', 'Cooked green beans', 'Cooked mushrooms',", "for dafook in output.split('\\n')])] if ('16s' in args): pheno = pheno[[c for c", "soup', 'Wholemeal Roll', 'Canned corn', 'Salty Cheese', 'Melawach', 'White cake', 'Apple', 'Lettuce Salad',", "(c[:2] not in ['c_', 'g_', 'o_', 's_', 'k_', 'p_', 'f_']): print c alpha_diversity_s", "mb_columns_extra += ['Tomato Freq','Cooked Tomatoes, Tomato Sauce, Tomato Soup Freq', 'Red Pepper Freq',", "'hunger' in args: mb_columns_extra += ['General Hunger','Morning Hunger', 'Midday Hunger', 'Evening Hunger'] if", "after birth', 'Taking contraceptives', 'Regular period', 'Irregular period', 'No period','Hormonal replacment', 'Past breastfeeding']", "Freq', 'Diet Soda Freq', 'Regular Sodas with Sugar Freq', 'Decaffeinated Coffee Freq', 'Coffee", "or \\ # np.isnan(pheno.loc[participant,'Carbs_g']) or \\ # np.isnan(pheno.loc[participant,'Fat_g']) or \\ # np.isnan(pheno.loc[participant,'Protain_g']): #", "'Cold cut', 'Gilthead Bream', 'Garlic', 'Grapes', 'Chocolate Chip Cookies', 'Cucumber', 'Mung Bean', 'Ketchup',", "pheno.loc[pheno.Waist==-9, 'WHR'] = np.nan pheno['LDLCholesterol'] = pheno['Cholesterol,total'] - pheno['HDLCholesterol'] - 2*pheno['Triglycerides'] if 'genotek_only'", "Coffee Freq', 'Coffee Freq', 'Herbal Tea Freq', 'Green Tea Freq', 'Regular Tea Freq',", "'calories','bloodType','questionnaires','keep_missingCovars','activity','activityTypesFreq','cereals', 'delivery','dressSweetners','drinks','fruits','hunger', 'legumes','meatProducts','pastry','qualityOfLiving', 'smoking','sweets','vegetables','womenOnlyQuestions', 'genotek_only', 'swab_only'] ffq_args = ['activity','activityTypesFreq','cereals', 'delivery','dressSweetners','drinks','fruits','hunger', 'legumes','meatProducts','pastry','qualityOfLiving', 'smoking','sweets','vegetables','womenOnlyQuestions'] drug_args=['D.lipid',", "'family', 'order', 'class', 'phylum']: df_taxa = pandas.read_csv(os.path.join(PNP_16S_DIR, taxa_level+'.txt'), sep='\\t', index_col=0) df_taxa[df_taxa<1e-3] = 1e-4", "mb_columns += [c for c in pheno.columns if c[:2]=='PC'] if 'lactose' in args:", "# sum+=1 # print participant # print pheno.loc[participant,['Calories_kcal','Carbs_g','Fat_g','Protain_g','Protain_g']] # print sum if np.isnan(pheno.loc[participant,'Age'])", "assert len(set(['all_bac','s', 'g','f','o','c','p','otu']).intersection(set(args)))==0, \\ 'taxa is mutual exclusive with all_bac,s,g,f,o,c,p,otu' if 'include_allPNP' in", "Chip Cookies', 'Cucumber', 'Mung Bean', 'Ketchup', 'Sweet Yogurt', 'Bread', 'Onion', 'Cream Cheese', 'Chicken", "in args: mb_columns += ['BMI','BPdia','BPsys','HeartRate','Height','Hips','WHR','Waist'] if 's_stats_pheno' in args: s_stats=['BMI','Cholesterol,total','WakeupGlucose','Albumin','Creatinine','HbA1C%','Height','Hips','Waist','WHR','HDLCholesterol'] #, 'Triglycerides', 'LDLCholesterol']", "Freq', 'Pear Fresh, Cooked or Canned Freq','Persimmon Freq', 'Watermelon Freq', 'Dried Fruits Freq',", "args: presence=((pheno[mb_columns]>threshold +1e-5)&(pheno[mb_columns]!=0)).astype(int).sum() else: presence=(pheno[mb_columns]>threshold +1e-5).astype(int).sum() presence=presence[presence > len(presence)*ratio].index.values.tolist() pheno=pheno[other_columns+presence] if ('keep_related' not", "args): if 'calories' not in args: features_to_drop +=['Age','Gender','Calories_kcal','Carbs_g','Fat_g','Protain_g','IsGenotek'] else: features_to_drop +=['Age','Gender'] if ('include_allPNP'", "in list(kwargs.keys())): assert len(set(['all_bac','s', 'g','f','o','c','p','otu']).intersection(set(args)))==0, \\ 'taxa is mutual exclusive with all_bac,s,g,f,o,c,p,otu' if", "Salad', 'Cereals', 'Yellow Cheese', 'Tea', 'Beer', 'Mozzarella Cheese', 'Fried onions', 'Ice cream', 'Cream", "'Quinoa', 'Cooked broccoli', 'Beef Cholent', 'Cracker', 'Chocolate Cookies', 'White beans', 'Cooked zucchini', 'Sweet", "else: for arg in drug_args: if arg in args: mb_columns += [arg] mb_columns_extra=[val.replace('", "if ('16s' in args): assert 'dic' not in args, '16s and dic are", "'Wholemeal Pita', 'Sunflower seeds', 'Coriander', 'Ciabatta', 'Tomato sauce', 'Heavy cream', 'Banana', 'Kif Kef',", "pheno[[c for c in pheno.columns if c[:2]=='s_']] pheno_g = pheno[[c for c in", "- freq'] if 'bloodType' in args: mb_columns_extra += ['Blood A','Blood B','Blood RH-'] if", "'Cereals', 'Yellow Cheese', 'Tea', 'Beer', 'Mozzarella Cheese', 'Fried onions', 'Ice cream', 'Cream Cake',", "support dicotomize bacteria' if 'IsGenotek' in args: assert 'covars' not in args, 'IsGenotek", "not in args, 'dic and no_log are mutually exclusive' pheno[mb_columns] = 10**pheno[mb_columns] if", "Salad Without Dressing or Oil Freq', 'Fresh Vegetable Salad With Dressing or Oil", "'delivery','dressSweetners','drinks','fruits','hunger', 'legumes','meatProducts','pastry','qualityOfLiving', 'smoking','sweets','vegetables','womenOnlyQuestions'] drug_args=['D.lipid', 'D.All', 'D.Psychiatric', 'D.pain', 'D.CVD', 'D.GI','D.Thyroid', 'D.NSAID','D.Contraception'] meals=['Vodka or Arak',", "'Chocolate', 'Quinoa', 'Cooked broccoli', 'Beef Cholent', 'Cracker', 'Chocolate Cookies', 'White beans', 'Cooked zucchini',", "if ('include_allPNP' not in args) and ('PCs' not in args): features_to_drop+=['PC1','PC2','PC3','PC4','PC5'] pheno=pheno.drop(features_to_drop,axis=1) if", "'Coffee Freq', 'Herbal Tea Freq', 'Green Tea Freq', 'Regular Tea Freq', 'Beer Freq',", "known_kwargs = ['ratio', 'threshold','taxa'] for arg in args: assert arg in known_args, 'unkown", "'Mixed Chicken or Turkey Dishes Freq', 'Beef or Chicken Soup Freq', 'Internal Organs", "'16s and dic are mutually exclusive' if ('taxa' in list(kwargs.keys())): assert len(set(['all_bac','s', 'g','f','o','c','p','otu']).intersection(set(args)))==0,", "Freq','Beef, Veal, Lamb, Pork, Steak, Golash Freq', 'Mixed Meat Dishes as Moussaka, Hamin,", "pheno.columns if c[:2] not in ['s_','g_','f_','o_','c_','p_']] if 'dic' in args: presence=((pheno[mb_columns]>threshold +1e-5)&(pheno[mb_columns]!=0)).astype(int).sum() else:", "df_household[df_household.index.isin(pheno.index)] remove_inds = df_household.index[vc.VertexCut().work(df_household.values, 0.5)] pheno=pheno[~pheno.index.isin(remove_inds)] if 'keep_missingCovars' not in args: #One participant", "Moussaka, Hamin, Cuba Freq', 'Mixed Chicken or Turkey Dishes Freq', 'Beef or Chicken", "Hunger'] if 'legumes' in args: mb_columns_extra += ['Falafel in Pita Bread Freq', 'Cooked", "['General Hunger','Morning Hunger', 'Midday Hunger', 'Evening Hunger'] if 'legumes' in args: mb_columns_extra +=", "'Pesto', 'Sugar substitute', 'Cheesecake', 'Raisins', 'Chocolate', 'Quinoa', 'Cooked broccoli', 'Beef Cholent', 'Cracker', 'Chocolate", "corn', 'Salty Cheese', 'Melawach', 'White cake', 'Apple', 'Lettuce Salad', 'Cereals', 'Yellow Cheese', 'Tea',", "mb_columns+=mb_columns_extra if 'meals' in args: mealsColumns=[val.replace(' ','_') for val in meals] #Correct by", "'Tahini', 'Chicken breast', 'Steak', 'Light Bread', 'Wholemeal Crackers', 'Sugar Free Gum', 'Hamburger', 'Dark", "'Milk', 'Peanut Butter', 'Cooked carrots', 'Lemon', 'Salty Cookies', 'Beef', 'Meatballs', 'Hamburger sandwich', 'Chicken", "bacteria' if 'IsGenotek' in args: assert 'covars' not in args, 'IsGenotek and covars", "Roll', 'Canned corn', 'Salty Cheese', 'Melawach', 'White cake', 'Apple', 'Lettuce Salad', 'Cereals', 'Yellow", "activity','Physical activity - mins','Physical activity - freq'] if 'activityTypesFreq' in args: mb_columns_extra +=", "dicotomize bacteria' if 'IsGenotek' in args: assert 'covars' not in args, 'IsGenotek and", "'p_', 'f_']): print c alpha_diversity_s = (pheno_s>pheno_s.min().min()).sum(axis=1) alpha_diversity_g = (pheno_g>pheno_g.min().min()).sum(axis=1) pheno.loc[pheno.Hips==-9, 'WHR'] =", "args: mb_columns += [c for c in pheno.columns if c[:4]=='OTU_' ] if 'no_log'", "Freq','Cooked Cereal such as Oatmeal Porridge Freq', 'Rice Freq','Couscous, Burgul, Mamaliga, Groats Freq',", "extract('s','include_allPNP','covars')#'include_allPNP','keep_household','ffq','keep_related')#'include_allPNP', print(phenoAll.shape) print(phenoAll.columns) phenoAll = extract('s','include_allPNP') print(phenoAll.shape) print(phenoAll.columns) phenoChip = extract('keep_household','s','keep_related') print(phenoChip.shape) print(phenoChip.columns)", "'_'\"%os.path.join(rawDataPath,'tmp','dfukim.txt')) pheno =pheno[~pheno.index.isin([int(dafook) for dafook in output.split('\\n')])] if ('16s' in args): pheno =", "pheno_nodic[[c for c in pheno_nodic.columns if c[:2]=='s_']] pheno_g = pheno_nodic[[c for c in", "'f_', 'o_', 'c_', 'p_')]] for taxa_level in ['otu', 'species', 'genus', 'family', 'order', 'class',", "Freq', 'Diet Juice Freq', 'Juice Freq', 'Diet Soda Freq', 'Regular Sodas with Sugar", "kind','Type 3 activity - freq'] if 'bloodType' in args: mb_columns_extra += ['Blood A','Blood", "or Matzah Freq', 'Wholemeal Crackers Freq', 'Small Burekas Freq', 'Jachnun, Mlawah, Kubana, Cigars", "import subprocess cleanDataPath='/net/mraid08/export/jafar/Microbiome/Analyses/PNPChip/cleanData/' rawDataPath='/net/mraid08/export/jafar/Microbiome/Analyses/PNPChip/rawData' pheno_fn_bac =os.path.join(cleanDataPath,'noMissingKakiPhenotypesWithCovariates_nodfukim.phenotypes') #pheno_fn_bac =os.path.join(cleanDataPath,'allChipPhenotypes_nodfukimWith5PCair.phenotypes') pheno_fn_bacDic=os.path.join(cleanDataPath,'dicNoMissingKakiPhenotypesWithCovariates_nodfukim.phenotypes') pheno_fn_bacAllPNP=os.path.join(rawDataPath,'allPNPPhenotypes.phenotypes') iidsNoSharedEnv='/net/mraid08/export/genie/Microbiome/Analyses/PNPChip/cleanData/PNP_autosomal_clean2_nodfukim_NoCouples.txt' PNP_16S_DIR =", "= -4 else: threshold=kwargs['threshold'] if 'ratio' in kwargs: ratio=kwargs['ratio'] mb_columns = [c for", "mb_columns_extra += ['Ordinary Bread or Challah Freq', 'Light Bread Freq', 'Wholemeal or Rye", "# 'MCH','MCHC','MCV','MPV','Monocytes%','Neutrophils%','Phosphorus','Platelets','Potassium','RBC', # 'RDW','Sodium','TSH','WBC','AlkalinePhosphatase','GGT','LDH','Iron','LDLCholesterol','Magnesium', # 'Triglycerides','TotalProtein','TotalBilirubin','Urea'] mb_columns += ['ALT','Albumin','AST','Basophils%','Calcium','Chloride','Cholesterol,total','Creatinine', 'CRP(WIDERANGE)','CRPhs','Eosinophils%','HCT','HDLCholesterol','Hemoglobin','HbA1C%','Lymphocytes%', 'MCH','MCHC','MCV','MPV','Monocytes%','Neutrophils%','Phosphorus','Platelets','Potassium','RBC', 'RDW','Sodium','TSH','WBC','LDLCholesterol'] if", "are mutually exclusive' if ('taxa' in list(kwargs.keys())): assert len(set(['all_bac','s', 'g','f','o','c','p','otu']).intersection(set(args)))==0, \\ 'taxa is", "Canned Freq'] if 'pastry' in args: mb_columns_extra += ['Ordinary Bread or Challah Freq',", "in args: #noSharedEnvIID=pandas.read_csv(iidsNoSharedEnv,usecols=[0],header=None,sep='\\t') #pheno=pheno[pheno.index.isin(noSharedEnvIID[0].astype(int).values)] #new code that decides which individuals to remove on", "'Carrot Cake', 'Doritos', 'Israeli couscous', 'Pistachio', 'Date honey', 'Vinaigrette', 'Bamba', 'Dark Chocolate', 'Turkey", "['dic', 'all_bac', 's', 'g','f','o','c','p','otu', 'all_non_bac', 'covars', 'blood', 'glucose', 'ffq', 'antropo', 's_stats_pheno', 'fid', 'keep_household',", "Torte Cakes, Chocolate Cake Freq', 'Fruit Pie or Cake Freq', 'Coated or Stuffed", "np.nan, inplace=True) if 'permute' in args: pheno=pandas.DataFrame(pheno.values[np.random.permutation(pheno.shape[0])],index=pheno.index,columns=pheno.columns) return pheno if __name__==\"__main__\": # pheno=extract('dic','covars','keep_household',\"pastry\",ratio=0.2)#'all_bac'", "Cuba Freq', 'Mixed Chicken or Turkey Dishes Freq', 'Beef or Chicken Soup Freq',", "Cooked, Carrot Juice Freq', 'Corn Freq', 'Parsley, Celery, Fennel, Dill, Cilantro, Green Onion", "kind', 'Type 2 activity - freq','T3Activity kind','Type 3 activity - freq'] if 'bloodType'", "in ['otu', 'species', 'genus', 'family', 'order', 'class', 'phylum']: df_taxa = pandas.read_csv(os.path.join(PNP_16S_DIR, taxa_level+'.txt'), sep='\\t',", "= [c for c in pheno.columns if c[:2] not in ['s_','g_','f_','o_','c_','p_']] if 'dic'", "'Avocado', 'Parsley', 'Coated peanuts', 'Sugar', 'Smoked Salmon', 'Melon', 'Roll', 'Whipped cream', 'Coconut milk',", "breast', 'Steak', 'Light Bread', 'Wholemeal Crackers', 'Sugar Free Gum', 'Hamburger', 'Dark Beer', 'Cooked", "not in args: #One participant 244624 has no 'Calories_kcal','Carbs_g','Fat_g','Protain_g' #3 participant 86356,762339,805175 have", "Freq', 'Regular Tea Freq', 'Beer Freq', 'Sweet Dry Wine, Cocktails Freq', 'Alcoholic Drinks", "'MeanGlucoseResponse','MeanBreadResponse','MeanBreadButterResponse'] if 'ffq' in args: mb_columns += ['Alcoholic_Drinks_Freq','Cigarretes_per_day','Coffee_Freq','Start_smoking_age'] if 'antropo' in args: mb_columns", "Burekas Freq', 'Jachnun, Mlawah, Kubana, Cigars Freq', 'Pizza Freq'] if 'qualityOfLiving' in args:", "Cakes, Chocolate Cake Freq', 'Fruit Pie or Cake Freq', 'Coated or Stuffed Cookies,", "'womenOnlyQuestions' in args: mb_columns_extra += ['Is pregnant','Is breastfeeding','Is after birth', 'Taking contraceptives', 'Regular", "in known_args, 'unkown arg: %s'%(arg) for kwarg in list(kwargs.keys()): assert kwarg in known_kwargs,", "Freq', 'Green Pepper Freq', 'Cucumber Freq', 'Zucchini or Eggplant Freq','Peas, Green Beans or", "'otu' in args: mb_columns += [c for c in pheno.columns if c[:4]=='OTU_' ]", "'dic' not in args, 'dic and no_log are mutually exclusive' pheno[mb_columns] = 10**pheno[mb_columns]", "Rice', 'Cold cut', 'Gilthead Bream', 'Garlic', 'Grapes', 'Chocolate Chip Cookies', 'Cucumber', 'Mung Bean',", "['ALT','Albumin','AST','Basophils%','Calcium','Chloride','Cholesterol,total','Creatinine', # 'CRP(WIDERANGE)','CRPhs','Eosinophils%','HCT','HDLCholesterol','Hemoglobin','HbA1C%','Lymphocytes%', # 'MCH','MCHC','MCV','MPV','Monocytes%','Neutrophils%','Phosphorus','Platelets','Potassium','RBC', # 'RDW','Sodium','TSH','WBC','AlkalinePhosphatase','GGT','LDH','Iron','LDLCholesterol','Magnesium', # 'Triglycerides','TotalProtein','TotalBilirubin','Urea'] mb_columns += ['ALT','Albumin','AST','Basophils%','Calcium','Chloride','Cholesterol,total','Creatinine', 'CRP(WIDERANGE)','CRPhs','Eosinophils%','HCT','HDLCholesterol','Hemoglobin','HbA1C%','Lymphocytes%',", "'c_', 'p_')]] for taxa_level in ['otu', 'species', 'genus', 'family', 'order', 'class', 'phylum']: df_taxa", "c in pheno.columns if c[:2]=='p_' ] if 'otu' in args: mb_columns += [c", "remove_inds = df_household.index[vc.VertexCut().work(df_household.values, 0.5)] pheno=pheno[~pheno.index.isin(remove_inds)] if 'keep_missingCovars' not in args: #One participant 244624", "for c in pheno_nodic.columns if c[:2]=='s_']] pheno_g = pheno_nodic[[c for c in pheno_nodic.columns", "'Red pepper', 'Bagel', 'Entrecote', 'Cottage cheese', 'Oil', 'Natural Yogurt', 'Walnuts', 'Edamame', 'Majadra', 'Oatmeal',", "Matzah Freq', 'Wholemeal Crackers Freq', 'Small Burekas Freq', 'Jachnun, Mlawah, Kubana, Cigars Freq',", "'Pistachio', 'Date honey', 'Vinaigrette', 'Bamba', 'Dark Chocolate', 'Turkey Shawarma', 'Olive oil', #u'Parmesan\\xc2\\xa0cheese', 'Guacamole',", "pheno.loc[pheno['IsGenotek']==1] if 'swab_only' in args: pheno = pheno.loc[pheno['IsGenotek']==0] mb_columns = [] if 'taxa'", "[c for c in pheno.columns if c[:2]=='c_' ] if 'p' in args: mb_columns", "['Blood A','Blood B','Blood RH-'] if 'cereals' in args: mb_columns_extra += ['Cornflakes Freq','Granola or", "pheno[mb_columns] = 10**pheno[mb_columns] if 'all_non_bac' in args: args=list(args)+['covars','blood','glucose','ffq','antropo'] mb_columns += ['Age','Gender','Calories_kcal','Carbs_g','Fat_g','Protain_g','IsGenotek'] if 'include_allPNP'", "in args: mb_columns_extra += ['Ordinary Bread or Challah Freq', 'Light Bread Freq', 'Wholemeal", "Maple syrup Freq', 'White or Brown Sugar Freq', 'Artificial Sweeteners Freq',] if 'drinks'", "'Feta Cheese', 'Mandarin', 'Pesto', 'Sugar substitute', 'Cheesecake', 'Raisins', 'Chocolate', 'Quinoa', 'Cooked broccoli', 'Beef", "('16s' in args): assert 'dic' not in args, '16s and dic are mutually", "Veal, Lamb, Pork, Steak, Golash Freq', 'Mixed Meat Dishes as Moussaka, Hamin, Cuba", "Freq', 'Mixed Chicken or Turkey Dishes Freq', 'Beef or Chicken Soup Freq', 'Internal", "'Wholemeal or Rye Bread Freq', 'Baguette Freq', 'Roll or Bageles Freq', 'Pita Freq',", "c in pheno.columns if c[:2]=='s_' ] if 'g' in args: mb_columns += [c", "args): pheno = pheno[[c for c in pheno if c[:2] not in ('s_',", "in Pita Bread Freq', 'Falafel in Pita version 2 Freq','Processed Meat Products Freq','Beef,", "Smoked, Canned Freq'] if 'pastry' in args: mb_columns_extra += ['Ordinary Bread or Challah", "'Falafel in Pita version 2 Freq','Processed Meat Products Freq','Beef, Veal, Lamb, Pork, Steak,", "as Rogallach, Croissant or Donut Freq', 'Cake, Torte Cakes, Chocolate Cake Freq', 'Fruit", "Fresh or Dry, or Loquat Freq', 'Grapes or Raisins Freq', 'Banana Freq', 'Melon", "pheno_fn_bacAllPNP=os.path.join(rawDataPath,'allPNPPhenotypes.phenotypes') iidsNoSharedEnv='/net/mraid08/export/genie/Microbiome/Analyses/PNPChip/cleanData/PNP_autosomal_clean2_nodfukim_NoCouples.txt' PNP_16S_DIR = '/net/mraid08/export/jafar/Microbiome/Analyses/PNPChip/16S' glycemicStatusPath='/net/mraid08/export/jafar/Microbiome/Analyses/PNPChip/glycemic_status.csv' def extract(*args,**kwargs): known_args = ['dic', 'all_bac', 's',", "'Beet Salad', 'Chocolate Mousse Cake', 'Sweet Roll', 'Danish', 'Coffee', 'Pasta Salad', 'Cuba', 'Chicken", "'Jachnun', 'Turkey', 'Sushi', 'Brazil nuts', 'Orange', 'Rice', 'Diet Fruit Drink', 'Corn schnitzel', 'Cappuccino',", "known_args+= ffq_args known_args+= drug_args known_kwargs = ['ratio', 'threshold','taxa'] for arg in args: assert", "Freq','Granola or Bernflaks Freq','Cooked Cereal such as Oatmeal Porridge Freq', 'Rice Freq','Couscous, Burgul,", "in list(kwargs.keys()): assert kwarg in known_kwargs, 'unkown kwarg: %s'%(kwarg) if ('16s' in args):", "'other' in args: #AddingIrisGlucose df_glucose = pandas.read_csv(glycemicStatusPath).set_index('RegNum') pheno = df_glucose.merge(pheno, left_index=True, right_index=True,how='right') mb_columns", "delivery','Was breastfed'] if 'dressSweetners' in args: mb_columns_extra += ['Oil as an addition for", "args: mb_columns_extra += ['Milk or Dark Chocolate Freq', 'Salty Snacks Freq', 'Cheese Cakes", "'Lettuce Freq','Carrots, Fresh or Cooked, Carrot Juice Freq', 'Corn Freq', 'Parsley, Celery, Fennel,", "Freq', 'Tahini Salad Freq', 'Cooked Vegetable Salads Freq', 'Pickled Vegetables Freq', 'Olives Freq']", "Soup Freq', 'Internal Organs Freq', 'Fish Cooked, Baked or Grilled Freq', 'Fried Fish", "if __name__==\"__main__\": # pheno=extract('dic','covars','keep_household',\"pastry\",ratio=0.2)#'all_bac' phenoAll = extract('s','include_allPNP','covars')#'include_allPNP','keep_household','ffq','keep_related')#'include_allPNP', print(phenoAll.shape) print(phenoAll.columns) phenoAll = extract('s','include_allPNP') print(phenoAll.shape)", "\\ # np.isnan(pheno.loc[participant,'Protain_g']): # sum+=1 # print participant # print pheno.loc[participant,['Calories_kcal','Carbs_g','Fat_g','Protain_g','Protain_g']] # print", "Cookies', 'Cucumber', 'Mung Bean', 'Ketchup', 'Sweet Yogurt', 'Bread', 'Onion', 'Cream Cheese', 'Chicken soup',", "Porridge Freq', 'Rice Freq','Couscous, Burgul, Mamaliga, Groats Freq', 'Potatoes Boiled, Baked, Mashed, Potatoes", "#u'Parmesan\\xc2\\xa0cheese', 'Guacamole', 'Coleslaw', 'Americano', 'Pesek Zman snack', 'Green onions', 'Mushrooms', 'Lemon juice', 'Canned", "Cream Cakes Freq', 'Yeast Cakes and Cookies as Rogallach, Croissant or Donut Freq',", "'Grapes', 'Chocolate Chip Cookies', 'Cucumber', 'Mung Bean', 'Ketchup', 'Sweet Yogurt', 'Bread', 'Onion', 'Cream", "= pheno[[c for c in pheno.columns if c[:2]=='s_']] pheno_g = pheno[[c for c", "count_A1=True)#.read() df_fam_no_related = pandas.read_csv(os.path.join(cleanDataPath, 'PNP_autosomal_clean2_nodfukim_norelated.fam'), delim_whitespace=True, index_col=0, header=None) df_fam = pandas.read_csv(os.path.join(cleanDataPath, 'PNP_autosomal_clean2_nodfukim.fam'), delim_whitespace=True,", "df_fam = pandas.read_csv(os.path.join(cleanDataPath, 'PNP_autosomal_clean2_nodfukim.fam'), delim_whitespace=True, index_col=0, header=None) df_related=df_fam[~df_fam.index.isin(df_fam_no_related.index)] pheno=pheno[(~pheno.index.isin(df_related.index))] if ('keep_sterile') not in", "print c mb_columns=list(set(mb_columns)) pheno= pheno[mb_columns] if 'threshold' not in kwargs: threshold = -4", "if c[:2]=='g_']] else: if 'include_allPNP' in args: pheno =pandas.read_csv(pheno_fn_bacAllPNP,sep='\\t') else: pheno =pandas.read_csv(pheno_fn_bac,sep='\\t') pheno.set_index('IID',", "'Turkey Meatballs, Beef, Chicken Freq', 'Shish Kebab in Pita Bread Freq', 'Falafel in", "for arg in drug_args: if arg in args: mb_columns += [arg] mb_columns_extra=[val.replace(' ','_')", "# if set(['Age','Gender','Calories_kcal','Carbs_g','Fat_g','Protain_g','IsGenotek'])<=set(pheno.columns.values): keep_inds=pheno.loc[:,['Age','Gender','Calories_kcal','Carbs_g','Fat_g','Protain_g','IsGenotek']].replace(-9, np.nan).dropna().index.values beforeNumParticpants=pheno.shape[0] pheno=pheno.loc[keep_inds] afterNumParticpants=pheno.shape[0] if beforeNumParticpants-afterNumParticpants>0: pass #print \"Removing", "if 'hunger' in args: mb_columns_extra += ['General Hunger','Morning Hunger', 'Midday Hunger', 'Evening Hunger']", "'ffq' in args: mb_columns += ['Alcoholic_Drinks_Freq','Cigarretes_per_day','Coffee_Freq','Start_smoking_age'] if 'antropo' in args: mb_columns += ['BMI','BPdia','BPsys','HeartRate','Height','Hips','WHR','Waist']", "as Oatmeal Porridge Freq', 'Rice Freq','Couscous, Burgul, Mamaliga, Groats Freq', 'Potatoes Boiled, Baked,", "= pheno[[c for c in pheno.columns if c[:2]=='g_']] ### for c in pheno:", "Vegetables Freq', 'Olives Freq'] if 'womenOnlyQuestions' in args: mb_columns_extra += ['Is pregnant','Is breastfeeding','Is", "in args: sterile_individuals = alpha_diversity_g[alpha_diversity_g < 4].index else: sterile_individuals = alpha_diversity_s[alpha_diversity_s < 15].index", "Bean', 'Ketchup', 'Sweet Yogurt', 'Bread', 'Onion', 'Cream Cheese', 'Chicken soup', 'Wholemeal Roll', 'Canned", "if 'blood' in args: # mb_columns += ['ALT','Albumin','AST','Basophils%','Calcium','Chloride','Cholesterol,total','Creatinine', # 'CRP(WIDERANGE)','CRPhs','Eosinophils%','HCT','HDLCholesterol','Hemoglobin','HbA1C%','Lymphocytes%', # 'MCH','MCHC','MCV','MPV','Monocytes%','Neutrophils%','Phosphorus','Platelets','Potassium','RBC', #", "+= [c for c in pheno.columns if c[:2]=='g_' ] if 'f' in args:", "Wine, Cocktails Freq', 'Alcoholic Drinks Freq'] if 'fruits' in args: mb_columns_extra += ['Mandarin", "participant # print pheno.loc[participant,['Calories_kcal','Carbs_g','Fat_g','Protain_g','Protain_g']] # print sum if np.isnan(pheno.loc[participant,'Age']) or np.isnan(pheno.loc[participant,'Gender']) : print(\"Participant", "'Sausages Freq', 'Sausages such as Salami Freq', 'Pastrami or Smoked Turkey Breast Freq',", "'Heavy cream', 'Banana', 'Kif Kef', 'Mustard', 'Coke', 'Vegetable Soup', 'Sausages', 'Pancake', 'Pasta', 'Sauteed", "'all_bac' in args: args=list(args)+['s','g','f','o','c','p'] if 's' in args: mb_columns += [c for c", "'Regular Sodas with Sugar Freq', 'Decaffeinated Coffee Freq', 'Coffee Freq', 'Herbal Tea Freq',", "period', 'Irregular period', 'No period','Hormonal replacment', 'Past breastfeeding'] if 'other' in args: #AddingIrisGlucose", "('covars_noPCs' not in args): features_to_drop += ['IsGenotek'] if ('covars' not in args) and", "if 'genotek_only' in args: pheno = pheno.loc[pheno['IsGenotek']==1] if 'swab_only' in args: pheno =", "Freq', 'Orange or Grapefruit Juice Freq', 'Apple Freq', 'Apricot Fresh or Dry, or", "if 'womenOnlyQuestions' in args: mb_columns_extra += ['Is pregnant','Is breastfeeding','Is after birth', 'Taking contraceptives',", "Mamaliga, Groats Freq', 'Potatoes Boiled, Baked, Mashed, Potatoes Salad Freq', 'Fries Freq', 'Pasta", "taxadf.columns.values.tolist() if 'all_bac' in args: args=list(args)+['s','g','f','o','c','p'] if 's' in args: mb_columns += [c", "'Couscous', 'Beet Salad', 'Chocolate Mousse Cake', 'Sweet Roll', 'Danish', 'Coffee', 'Pasta Salad', 'Cuba',", "['C-Section','Home delivery','Was breastfed'] if 'dressSweetners' in args: mb_columns_extra += ['Oil as an addition", "Freq', 'Cheese Cakes or Cream Cakes Freq', 'Yeast Cakes and Cookies as Rogallach,", "'Regular period', 'Irregular period', 'No period','Hormonal replacment', 'Past breastfeeding'] if 'other' in args:", "mb_columns = [c for c in pheno.columns if c[:2] in ['s_','g_','f_','o_','c_','p_']] other_columns =", "'Roasted eggplant', 'Baguette', 'Lentil Soup', 'Tzfatit Cheese', 'Nectarine', 'Chicken legs', 'Nuts', 'Goat Cheese',", "mb_columns_extra += ['Blood A','Blood B','Blood RH-'] if 'cereals' in args: mb_columns_extra += ['Cornflakes", "= pheno.merge(df_taxa, left_index=True, right_index=True) pheno_s = pheno[[c for c in pheno.columns if c[:2]=='s_']]", "version 2 Freq','Processed Meat Products Freq','Beef, Veal, Lamb, Pork, Steak, Golash Freq', 'Mixed", "Freq', 'Yeast Cakes and Cookies as Rogallach, Croissant or Donut Freq', 'Cake, Torte", "Pork, Steak, Golash Freq', 'Mixed Meat Dishes as Moussaka, Hamin, Cuba Freq', 'Mixed", "for val in meals] #Correct by total calories pheno.loc[:,mealsColumns]=pheno[mealsColumns][pheno[mealsColumns]!=-9].div(pheno['Calories_kcal_Total'].values,axis=0) pheno.replace(np.nan, 0,inplace=True) mb_columns +=", "['Nectar, Cider Freq', 'Diet Juice Freq', 'Juice Freq', 'Diet Soda Freq', 'Regular Sodas", "or Turkey With Skin Freq', 'Chicken or Turkey Without Skin Freq', 'Sausages Freq',", "Grains, Watermelon Seeds Freq', 'Nuts, almonds, pistachios Freq','Peanuts Freq'] if 'vegetables' in args:", "args: mb_columns += [c for c in pheno.columns if c[:2]=='o_' ] if 'c'", "+= ['C-Section','Home delivery','Was breastfed'] if 'dressSweetners' in args: mb_columns_extra += ['Oil as an", "Fresh, Cooked or Canned Freq','Persimmon Freq', 'Watermelon Freq', 'Dried Fruits Freq', 'Fruit Salad", "pandas.read_csv(os.path.join(cleanDataPath, 'PNP_autosomal_clean2_nodfukim_norelated.fam'), delim_whitespace=True, index_col=0, header=None) df_fam = pandas.read_csv(os.path.join(cleanDataPath, 'PNP_autosomal_clean2_nodfukim.fam'), delim_whitespace=True, index_col=0, header=None) df_related=df_fam[~df_fam.index.isin(df_fam_no_related.index)]", "['ALT','Albumin','AST','Basophils%','Calcium','Chloride','Cholesterol,total','Creatinine', 'CRP(WIDERANGE)','CRPhs','Eosinophils%','HCT','HDLCholesterol','Hemoglobin','HbA1C%','Lymphocytes%', 'MCH','MCHC','MCV','MPV','Monocytes%','Neutrophils%','Phosphorus','Platelets','Potassium','RBC', 'RDW','Sodium','TSH','WBC','LDLCholesterol'] if 'glucose' in args: mb_columns += ['95P_Glucose','Glucose_Noise','Max_Glucose','Median_Glucose','WakeupGlucose', 'MeanGlucoseResponse','MeanBreadResponse','MeanBreadButterResponse'] if", "Meat Free Products Freq'] if 'meatProducts' in args: mb_columns_extra += ['Egg Recipes Freq',", "syrup, Maple syrup Freq', 'White or Brown Sugar Freq', 'Artificial Sweeteners Freq',] if", "'Melon', 'Roll', 'Whipped cream', 'Coconut milk', 'Pretzels', 'Kohlrabi', 'Eggplant Salad', 'Cooked green beans',", "'Olive oil', #u'Parmesan\\xc2\\xa0cheese', 'Guacamole', 'Coleslaw', 'Americano', 'Pesek Zman snack', 'Green onions', 'Mushrooms', 'Lemon", "[c for c in pheno.columns if c[:2] in ['s_','g_','f_','o_','c_','p_']] other_columns = [c for", "Vegetable Salad Without Dressing or Oil Freq', 'Fresh Vegetable Salad With Dressing or", "Freq'] if 'pastry' in args: mb_columns_extra += ['Ordinary Bread or Challah Freq', 'Light", "mb_columns_extra] mb_columns+=mb_columns_extra if 'meals' in args: mealsColumns=[val.replace(' ','_') for val in meals] #Correct", "'Peach, Nectarine, Plum Freq', 'Pear Fresh, Cooked or Canned Freq','Persimmon Freq', 'Watermelon Freq',", "'Potato chips', 'White Cheese', 'French fries', 'Wholemeal Bread', 'Tuna Salad', 'Chocolate spread', 'Kebab',", "if 'dic' in args: presence=((pheno[mb_columns]>threshold +1e-5)&(pheno[mb_columns]!=0)).astype(int).sum() else: presence=(pheno[mb_columns]>threshold +1e-5).astype(int).sum() presence=presence[presence > len(presence)*ratio].index.values.tolist() pheno=pheno[other_columns+presence]", "=pandas.read_csv(pheno_fn_bacDic,sep='\\t') pheno.set_index('IID', inplace=True, drop=True) pheno_nodic =pandas.read_csv(pheno_fn_bac,sep='\\t') pheno_nodic.set_index('IID', inplace=True, drop=True) pheno_s = pheno_nodic[[c for", "in args) and ('PCs' not in args): features_to_drop+=['PC1','PC2','PC3','PC4','PC5'] pheno=pheno.drop(features_to_drop,axis=1) if ('-9' not in", "in known_kwargs, 'unkown kwarg: %s'%(kwarg) if ('16s' in args): assert 'dic' not in", "'s' in args: mb_columns += [c for c in pheno.columns if c[:2]=='s_' ]", "Freq', 'Hummus Salad Freq', 'Tahini Salad Freq', 'Cooked Vegetable Salads Freq', 'Pickled Vegetables", "= df_glucose.merge(pheno, left_index=True, right_index=True,how='right') mb_columns +=['median_Without_BMI_ALT_Overall','WakeupGlucose','BMI','VegeterianScale'] pheno.loc[pheno['VegeterianScale']<0, 'VegeterianScale']=np.nan if 'drugs' in args: mb_columns+=drug_args", "Dishes Freq', 'Beef or Chicken Soup Freq', 'Internal Organs Freq', 'Fish Cooked, Baked", "15].index pheno=pheno[~pheno.index.isin(sterile_individuals)] if 'keep_household' not in args: #noSharedEnvIID=pandas.read_csv(iidsNoSharedEnv,usecols=[0],header=None,sep='\\t') #pheno=pheno[pheno.index.isin(noSharedEnvIID[0].astype(int).values)] #new code that decides", "Freq', 'Regular Sodas with Sugar Freq', 'Decaffeinated Coffee Freq', 'Coffee Freq', 'Herbal Tea", "mb_columns += ['ALT','Albumin','AST','Basophils%','Calcium','Chloride','Cholesterol,total','Creatinine', # 'CRP(WIDERANGE)','CRPhs','Eosinophils%','HCT','HDLCholesterol','Hemoglobin','HbA1C%','Lymphocytes%', # 'MCH','MCHC','MCV','MPV','Monocytes%','Neutrophils%','Phosphorus','Platelets','Potassium','RBC', # 'RDW','Sodium','TSH','WBC','AlkalinePhosphatase','GGT','LDH','Iron','LDLCholesterol','Magnesium', # 'Triglycerides','TotalProtein','TotalBilirubin','Urea'] mb_columns +=", "'Chicken breast', 'Steak', 'Light Bread', 'Wholemeal Crackers', 'Sugar Free Gum', 'Hamburger', 'Dark Beer',", "'swab_only' in args: pheno = pheno.loc[pheno['IsGenotek']==0] mb_columns = [] if 'taxa' in kwargs:", "'s_stats_pheno' in args: s_stats=['BMI','Cholesterol,total','WakeupGlucose','Albumin','Creatinine','HbA1C%','Height','Hips','Waist','WHR','HDLCholesterol'] #, 'Triglycerides', 'LDLCholesterol'] mb_columns+=s_stats mb_columns=list(set(mb_columns)) if 'fid' in args:", "crackers', 'Wafers', 'Lettuce', 'Rice Noodles', 'Lentils', 'Mutton', 'Wholemeal Noodles', 'Green Tea', 'Schnitzel', 'Brown", "'Light Bread', 'Wholemeal Crackers', 'Sugar Free Gum', 'Hamburger', 'Dark Beer', 'Cooked beets', 'Almonds',", "Canned Freq','Persimmon Freq', 'Watermelon Freq', 'Dried Fruits Freq', 'Fruit Salad Freq'] if 'hunger'", "'all_non_bac', 'covars', 'blood', 'glucose', 'ffq', 'antropo', 's_stats_pheno', 'fid', 'keep_household', 'no_log', 'keep_related', 'keep_sterile', '16s',", "not in ['s_','g_','f_','o_','c_','p_']] if 'dic' in args: presence=((pheno[mb_columns]>threshold +1e-5)&(pheno[mb_columns]!=0)).astype(int).sum() else: presence=(pheno[mb_columns]>threshold +1e-5).astype(int).sum() presence=presence[presence", "for taxa in kwargs['taxa']: taxadf=pheno.filter(regex=(taxa)) mb_columns += taxadf.columns.values.tolist() if 'all_bac' in args: args=list(args)+['s','g','f','o','c','p']", "mb_columns += ['lactose'] if 'blood' in args: # mb_columns += ['ALT','Albumin','AST','Basophils%','Calcium','Chloride','Cholesterol,total','Creatinine', # 'CRP(WIDERANGE)','CRPhs','Eosinophils%','HCT','HDLCholesterol','Hemoglobin','HbA1C%','Lymphocytes%',", "'Roll or Bageles Freq', 'Pita Freq', 'Saltine Crackers or Matzah Freq', 'Wholemeal Crackers", "'Pickled cucumber', 'Soymilk', 'Dates', 'Croissant', 'Biscuit', 'Potato chips', 'White Cheese', 'French fries', 'Wholemeal", "arg: %s'%(arg) for kwarg in list(kwargs.keys()): assert kwarg in known_kwargs, 'unkown kwarg: %s'%(kwarg)", "Salad', 'Cuba', 'Chicken Liver', 'Sweet Challah', 'Minced meat', 'Chocolate cake', 'Diet Coke', 'Dried", "'WHR'] = np.nan pheno['LDLCholesterol'] = pheno['Cholesterol,total'] - pheno['HDLCholesterol'] - 2*pheno['Triglycerides'] if 'genotek_only' in", "Freq'] if 'womenOnlyQuestions' in args: mb_columns_extra += ['Is pregnant','Is breastfeeding','Is after birth', 'Taking", "drop=True) pheno_nodic =pandas.read_csv(pheno_fn_bac,sep='\\t') pheno_nodic.set_index('IID', inplace=True, drop=True) pheno_s = pheno_nodic[[c for c in pheno_nodic.columns", "in args: mb_columns += [c for c in pheno.columns if c[:2]=='s_' ] if", "Freq','Peanuts Freq'] if 'vegetables' in args: mb_columns_extra += ['Tomato Freq','Cooked Tomatoes, Tomato Sauce,", "=pandas.read_csv(pheno_fn_bac,sep='\\t') pheno.set_index('IID', inplace=True, drop=True) if 'include_allPNP'in args: status, output = subprocess.getstatusoutput(\"cut -f 1", "'Garlic', 'Grapes', 'Chocolate Chip Cookies', 'Cucumber', 'Mung Bean', 'Ketchup', 'Sweet Yogurt', 'Bread', 'Onion',", "'Beer', 'Mozzarella Cheese', 'Fried onions', 'Ice cream', 'Cream Cake', 'Green cabbage', 'Olives', 'Balsamic", "left_index=True, right_index=True,how='right') mb_columns +=['median_Without_BMI_ALT_Overall','WakeupGlucose','BMI','VegeterianScale'] pheno.loc[pheno['VegeterianScale']<0, 'VegeterianScale']=np.nan if 'drugs' in args: mb_columns+=drug_args else: for", "Dressing or Oil Freq', 'Fresh Vegetable Salad With Dressing or Oil Freq', 'Avocado", "print(pheno.shape) sum=0 for participant in pheno[['Age','Gender']].index.values: # if np.isnan(pheno.loc[participant,'Calories_kcal']) or \\ # np.isnan(pheno.loc[participant,'Carbs_g'])", "Milk', 'Pickled cucumber', 'Soymilk', 'Dates', 'Croissant', 'Biscuit', 'Potato chips', 'White Cheese', 'French fries',", "or Cream Cakes Freq', 'Yeast Cakes and Cookies as Rogallach, Croissant or Donut", "almonds, pistachios Freq','Peanuts Freq'] if 'vegetables' in args: mb_columns_extra += ['Tomato Freq','Cooked Tomatoes,", "Mousse Cake', 'Sweet Roll', 'Danish', 'Coffee', 'Pasta Salad', 'Cuba', 'Chicken Liver', 'Sweet Challah',", "exclusive' if ('taxa' in list(kwargs.keys())): assert len(set(['all_bac','s', 'g','f','o','c','p','otu']).intersection(set(args)))==0, \\ 'taxa is mutual exclusive", "args: assert 'dic' not in args, 'dic and no_log are mutually exclusive' pheno[mb_columns]", "3 activity - freq'] if 'bloodType' in args: mb_columns_extra += ['Blood A','Blood B','Blood", "pheno=pandas.DataFrame(pheno.values[np.random.permutation(pheno.shape[0])],index=pheno.index,columns=pheno.columns) return pheno if __name__==\"__main__\": # pheno=extract('dic','covars','keep_household',\"pastry\",ratio=0.2)#'all_bac' phenoAll = extract('s','include_allPNP','covars')#'include_allPNP','keep_household','ffq','keep_related')#'include_allPNP', print(phenoAll.shape) print(phenoAll.columns) phenoAll", "Boiled or Soft Freq', 'Schnitzel Turkey or Chicken Freq', 'Chicken or Turkey With", "'Cheese Cakes or Cream Cakes Freq', 'Yeast Cakes and Cookies as Rogallach, Croissant", "drug_args known_kwargs = ['ratio', 'threshold','taxa'] for arg in args: assert arg in known_args,", "in args if 'dic' in args: pheno =pandas.read_csv(pheno_fn_bacDic,sep='\\t') pheno.set_index('IID', inplace=True, drop=True) pheno_nodic =pandas.read_csv(pheno_fn_bac,sep='\\t')", "c in pheno_nodic.columns if c[:2]=='s_']] pheno_g = pheno_nodic[[c for c in pheno_nodic.columns if", "+= ['T1Activity kind','Type 1 activity - freq','T2Activity kind', 'Type 2 activity - freq','T3Activity", "Freq', 'Light Bread Freq', 'Wholemeal or Rye Bread Freq', 'Baguette Freq', 'Roll or", "in args: assert '16s' in args if 'dic' in args: pheno =pandas.read_csv(pheno_fn_bacDic,sep='\\t') pheno.set_index('IID',", "if 'sweets' in args: mb_columns_extra += ['Milk or Dark Chocolate Freq', 'Salty Snacks", "mushrooms', 'Watermelon', 'Grilled cheese', 'Bissli', 'Pullet', 'Hummus', 'Chinese Chicken Noodles', 'Shakshouka', 'Tahini', 'Chicken", "for c in df_household.columns if int(c) in pheno.index]] df_household = df_household[df_household.index.isin(pheno.index)] remove_inds =", "mutually exclusive' pheno[mb_columns] = 10**pheno[mb_columns] if 'all_non_bac' in args: args=list(args)+['covars','blood','glucose','ffq','antropo'] mb_columns += ['Age','Gender','Calories_kcal','Carbs_g','Fat_g','Protain_g','IsGenotek']", "'Roll', 'Whipped cream', 'Coconut milk', 'Pretzels', 'Kohlrabi', 'Eggplant Salad', 'Cooked green beans', 'Cooked", "not in args or ('PCs') in args: mb_columns += [c for c in", "print sum if np.isnan(pheno.loc[participant,'Age']) or np.isnan(pheno.loc[participant,'Gender']) : print(\"Participant %s, age %s, gender %s\"", "'genotek_only', 'swab_only'] ffq_args = ['activity','activityTypesFreq','cereals', 'delivery','dressSweetners','drinks','fruits','hunger', 'legumes','meatProducts','pastry','qualityOfLiving', 'smoking','sweets','vegetables','womenOnlyQuestions'] drug_args=['D.lipid', 'D.All', 'D.Psychiatric', 'D.pain', 'D.CVD',", "Freq'] if 'vegetables' in args: mb_columns_extra += ['Tomato Freq','Cooked Tomatoes, Tomato Sauce, Tomato", "does not support dicotomize bacteria' if 'IsGenotek' in args: assert 'covars' not in", "[c for c in pheno.columns if c[:2]=='PC'] if 'lactose' in args: mb_columns +=", "u'Soda water',u'Water', u'Salt', known_args+= ffq_args known_args+= drug_args known_kwargs = ['ratio', 'threshold','taxa'] for arg", "'PNP_autosomal_clean2_nodfukim.fam'), delim_whitespace=True, index_col=0, header=None) df_related=df_fam[~df_fam.index.isin(df_fam_no_related.index)] pheno=pheno[(~pheno.index.isin(df_related.index))] if ('keep_sterile') not in args: if '16s'", "'/net/mraid08/export/jafar/Microbiome/Analyses/PNPChip/16S' glycemicStatusPath='/net/mraid08/export/jafar/Microbiome/Analyses/PNPChip/glycemic_status.csv' def extract(*args,**kwargs): known_args = ['dic', 'all_bac', 's', 'g','f','o','c','p','otu', 'all_non_bac', 'covars', 'blood',", "'covars_noPCs', 'PCs', 'lactose','include_allPNP','IsGenotek','permute','meals','other','drugs', 'calories','bloodType','questionnaires','keep_missingCovars','activity','activityTypesFreq','cereals', 'delivery','dressSweetners','drinks','fruits','hunger', 'legumes','meatProducts','pastry','qualityOfLiving', 'smoking','sweets','vegetables','womenOnlyQuestions', 'genotek_only', 'swab_only'] ffq_args = ['activity','activityTypesFreq','cereals', 'delivery','dressSweetners','drinks','fruits','hunger',", "########################FFQ END##################### #for c in pheno: print c mb_columns=list(set(mb_columns)) pheno= pheno[mb_columns] if 'threshold'", "'Olives Freq'] if 'womenOnlyQuestions' in args: mb_columns_extra += ['Is pregnant','Is breastfeeding','Is after birth',", "'Turkey', 'Sushi', 'Brazil nuts', 'Orange', 'Rice', 'Diet Fruit Drink', 'Corn schnitzel', 'Cappuccino', 'Low", "'Vegetable Soup', 'Sausages', 'Pancake', 'Pasta', 'Sauteed vegetables', 'Plum', 'Goat Milk Yogurt', 'Orange juice',", "'Cracker', 'Chocolate Cookies', 'White beans', 'Cooked zucchini', 'Sweet potato', 'Wine', 'Cookies', 'Challah', 'Spelled',", "'Lemon juice', 'Canned Tuna Fish', 'Vegetable Salad', 'Fried eggplant', 'Salmon', 'Cashew', 'Jewish donut',", "args: mb_columns += [arg] mb_columns_extra=[val.replace(' ','_') for val in mb_columns_extra] mb_columns+=mb_columns_extra if 'meals'", "peanuts', 'Sugar', 'Smoked Salmon', 'Melon', 'Roll', 'Whipped cream', 'Coconut milk', 'Pretzels', 'Kohlrabi', 'Eggplant", "for c in pheno.columns if c[:2]=='s_' ] if 'g' in args: mb_columns +=", "0,inplace=True) mb_columns += mealsColumns ########################FFQ END##################### #for c in pheno: print c mb_columns=list(set(mb_columns))", "mb_columns = ['FID']+mb_columns ########################FFQ START##################### if 'questionnaires' in args: args=list(args)+ffq_args mb_columns_extra=[] if 'activity'", "args) and ('PCs' not in args): features_to_drop+=['PC1','PC2','PC3','PC4','PC5'] pheno=pheno.drop(features_to_drop,axis=1) if ('-9' not in args):", "replacment', 'Past breastfeeding'] if 'other' in args: #AddingIrisGlucose df_glucose = pandas.read_csv(glycemicStatusPath).set_index('RegNum') pheno =", "'Sunflower seeds', 'Coriander', 'Ciabatta', 'Tomato sauce', 'Heavy cream', 'Banana', 'Kif Kef', 'Mustard', 'Coke',", "'Edamame', 'Majadra', 'Oatmeal', 'Soy sauce', 'Strawberry', 'Pastrami', 'Lemonade', 'Pasta with tomato sauce', 'Chicken']#removed:", "+= [c for c in pheno.columns if c[:4]=='OTU_' ] if 'no_log' in args:", "'Green Tea Freq', 'Regular Tea Freq', 'Beer Freq', 'Sweet Dry Wine, Cocktails Freq',", "Cooked, Baked or Grilled Freq', 'Fried Fish Freq', 'Canned Tuna or Tuna Salad", "or Broccoli Freq','Sweet Potato Freq', 'Brussels Sprouts, Green or Red Cabbage Freq', 'Lettuce", "in pheno_nodic.columns if c[:2]=='s_']] pheno_g = pheno_nodic[[c for c in pheno_nodic.columns if c[:2]=='g_']]", "'Midday Hunger', 'Evening Hunger'] if 'legumes' in args: mb_columns_extra += ['Falafel in Pita", "Kubana, Cigars Freq', 'Pizza Freq'] if 'qualityOfLiving' in args: mb_columns_extra += ['Stress','Sleep quality']", "syrup Freq', 'White or Brown Sugar Freq', 'Artificial Sweeteners Freq',] if 'drinks' in", "Smoked Turkey Breast Freq', 'Turkey Meatballs, Beef, Chicken Freq', 'Shish Kebab in Pita", "quality'] if 'smoking' in args: mb_columns_extra += ['Currently smokes','Ever smoked'] if 'sweets' in", "'16s' in args: sterile_individuals = alpha_diversity_g[alpha_diversity_g < 4].index else: sterile_individuals = alpha_diversity_s[alpha_diversity_s <", "'Mutton', 'Wholemeal Noodles', 'Green Tea', 'Schnitzel', 'Brown Sugar', 'Peanuts', 'Mayonnaise', 'Persimmon', 'Apple juice',", "Salads Freq', 'Pickled Vegetables Freq', 'Olives Freq'] if 'womenOnlyQuestions' in args: mb_columns_extra +=", "'White cake', 'Apple', 'Lettuce Salad', 'Cereals', 'Yellow Cheese', 'Tea', 'Beer', 'Mozzarella Cheese', 'Fried", "args: #noSharedEnvIID=pandas.read_csv(iidsNoSharedEnv,usecols=[0],header=None,sep='\\t') #pheno=pheno[pheno.index.isin(noSharedEnvIID[0].astype(int).values)] #new code that decides which individuals to remove on the", "in args: mb_columns_extra += ['Blood A','Blood B','Blood RH-'] if 'cereals' in args: mb_columns_extra", "cut -f 1 -d '_'\"%os.path.join(rawDataPath,'tmp','dfukim.txt')) pheno =pheno[~pheno.index.isin([int(dafook) for dafook in output.split('\\n')])] if ('16s'", "presence=((pheno[mb_columns]>threshold +1e-5)&(pheno[mb_columns]!=0)).astype(int).sum() else: presence=(pheno[mb_columns]>threshold +1e-5).astype(int).sum() presence=presence[presence > len(presence)*ratio].index.values.tolist() pheno=pheno[other_columns+presence] if ('keep_related' not in", "else: if 'include_allPNP' in args: pheno =pandas.read_csv(pheno_fn_bacAllPNP,sep='\\t') else: pheno =pandas.read_csv(pheno_fn_bac,sep='\\t') pheno.set_index('IID', inplace=True, drop=True)", "if 'o' in args: mb_columns += [c for c in pheno.columns if c[:2]=='o_'", "'Beet', 'Couscous', 'Beet Salad', 'Chocolate Mousse Cake', 'Sweet Roll', 'Danish', 'Coffee', 'Pasta Salad',", "'Nectarine', 'Chicken legs', 'Nuts', 'Goat Cheese', 'Jam', 'Feta Cheese', 'Mandarin', 'Pesto', 'Sugar substitute',", "df_taxa = pandas.read_csv(os.path.join(PNP_16S_DIR, taxa_level+'.txt'), sep='\\t', index_col=0) df_taxa[df_taxa<1e-3] = 1e-4 df_taxa = np.log10(df_taxa) pheno", "in meals] #Correct by total calories pheno.loc[:,mealsColumns]=pheno[mealsColumns][pheno[mealsColumns]!=-9].div(pheno['Calories_kcal_Total'].values,axis=0) pheno.replace(np.nan, 0,inplace=True) mb_columns += mealsColumns ########################FFQ", "in args: mb_columns_extra += ['Tomato Freq','Cooked Tomatoes, Tomato Sauce, Tomato Soup Freq', 'Red", "if 'otu' in args: assert '16s' in args if 'dic' in args: pheno", "if 'otu' in args: mb_columns += [c for c in pheno.columns if c[:4]=='OTU_'", "'MCH','MCHC','MCV','MPV','Monocytes%','Neutrophils%','Phosphorus','Platelets','Potassium','RBC', 'RDW','Sodium','TSH','WBC','LDLCholesterol'] if 'glucose' in args: mb_columns += ['95P_Glucose','Glucose_Noise','Max_Glucose','Median_Glucose','WakeupGlucose', 'MeanGlucoseResponse','MeanBreadResponse','MeanBreadButterResponse'] if 'ffq' in", "Pepper Freq', 'Cucumber Freq', 'Zucchini or Eggplant Freq','Peas, Green Beans or Okra Cooked", "not in args): pheno.replace(-9, np.nan, inplace=True) if 'permute' in args: pheno=pandas.DataFrame(pheno.values[np.random.permutation(pheno.shape[0])],index=pheno.index,columns=pheno.columns) return pheno", "in args: pheno = pheno.loc[pheno['IsGenotek']==0] mb_columns = [] if 'taxa' in kwargs: if", "args: features_to_drop +=['Age','Gender','Calories_kcal','Carbs_g','Fat_g','Protain_g','IsGenotek'] else: features_to_drop +=['Age','Gender'] if ('include_allPNP' not in args) and ('PCs'", "if 'fid' in args: mb_columns = ['FID']+mb_columns ########################FFQ START##################### if 'questionnaires' in args:", "Freq', 'Wholemeal Crackers Freq', 'Small Burekas Freq', 'Jachnun, Mlawah, Kubana, Cigars Freq', 'Pizza", "if 'questionnaires' in args: args=list(args)+ffq_args mb_columns_extra=[] if 'activity' in args: mb_columns_extra += ['Work", "in args: mb_columns_extra += ['Falafel in Pita Bread Freq', 'Cooked Legumes Freq', 'Processed", "= ['ratio', 'threshold','taxa'] for arg in args: assert arg in known_args, 'unkown arg:", "=pandas.read_csv(pheno_fn_bacAllPNP,sep='\\t') else: pheno =pandas.read_csv(pheno_fn_bac,sep='\\t') pheno.set_index('IID', inplace=True, drop=True) if 'include_allPNP'in args: status, output =", "carrots', 'Lemon', 'Salty Cookies', 'Beef', 'Meatballs', 'Hamburger sandwich', 'Chicken thighs', 'Granola', 'Beet', 'Couscous',", "args: mb_columns_extra += ['Is pregnant','Is breastfeeding','Is after birth', 'Taking contraceptives', 'Regular period', 'Irregular", "pheno.columns if c[:2]=='o_' ] if 'c' in args: mb_columns += [c for c", "'Fruit Pie or Cake Freq', 'Coated or Stuffed Cookies, Waffles or Biscuits Freq',", "'Natural Yogurt', 'Walnuts', 'Edamame', 'Majadra', 'Oatmeal', 'Soy sauce', 'Strawberry', 'Pastrami', 'Lemonade', 'Pasta with", "\"Only in chip:\" # print set(phenoChip.index.values)-set(phenoAll.index.values) # print len(set(phenoChip.index)-set(phenoAll.index)) print(pheno.columns.values.tolist()) print(pheno.shape) sum=0 for", "print len(set(phenoChip.index)-set(phenoAll.index)) print(pheno.columns.values.tolist()) print(pheno.shape) sum=0 for participant in pheno[['Age','Gender']].index.values: # if np.isnan(pheno.loc[participant,'Calories_kcal']) or", "kwargs: ratio=kwargs['ratio'] mb_columns = [c for c in pheno.columns if c[:2] in ['s_','g_','f_','o_','c_','p_']]", "= '/net/mraid08/export/jafar/Microbiome/Analyses/PNPChip/16S' glycemicStatusPath='/net/mraid08/export/jafar/Microbiome/Analyses/PNPChip/glycemic_status.csv' def extract(*args,**kwargs): known_args = ['dic', 'all_bac', 's', 'g','f','o','c','p','otu', 'all_non_bac', 'covars',", "('covars' not in args) and ('covars_noPCs' not in args): features_to_drop += ['IsGenotek'] if", "'Saltine Crackers or Matzah Freq', 'Wholemeal Crackers Freq', 'Small Burekas Freq', 'Jachnun, Mlawah,", "Green Beans or Okra Cooked Freq', 'Cauliflower or Broccoli Freq','Sweet Potato Freq', 'Brussels", "Light Bread', 'Marble Cake', 'Brown Rice', 'Cold cut', 'Gilthead Bream', 'Garlic', 'Grapes', 'Chocolate", "pheno.columns if c[:2]=='g_' ] if 'f' in args: mb_columns += [c for c", "'Grapes or Raisins Freq', 'Banana Freq', 'Melon Freq', 'Kiwi or Strawberries Freq', 'Mango", "'Hummus', 'Chinese Chicken Noodles', 'Shakshouka', 'Tahini', 'Chicken breast', 'Steak', 'Light Bread', 'Wholemeal Crackers',", "'permute' in args: pheno=pandas.DataFrame(pheno.values[np.random.permutation(pheno.shape[0])],index=pheno.index,columns=pheno.columns) return pheno if __name__==\"__main__\": # pheno=extract('dic','covars','keep_household',\"pastry\",ratio=0.2)#'all_bac' phenoAll = extract('s','include_allPNP','covars')#'include_allPNP','keep_household','ffq','keep_related')#'include_allPNP',", "+= ['Work activity','Physical activity - mins','Physical activity - freq'] if 'activityTypesFreq' in args:", "vc df_household = pandas.read_csv(os.path.join(cleanDataPath, 'EnvironmentBlock.txt'), delim_whitespace=True) df_household = df_household[[c for c in df_household.columns", "or \\ # np.isnan(pheno.loc[participant,'Fat_g']) or \\ # np.isnan(pheno.loc[participant,'Protain_g']): # sum+=1 # print participant", "not in args, '16s and dic are mutually exclusive' if ('taxa' in list(kwargs.keys())):", "Yogurt', 'Orange juice', 'Potatoes', 'Halva', 'Yellow pepper', 'Mango', 'Lasagna', 'Popcorn', 'Hummus Salad', 'Tilapia',", "no_log are mutually exclusive' pheno[mb_columns] = 10**pheno[mb_columns] if 'all_non_bac' in args: args=list(args)+['covars','blood','glucose','ffq','antropo'] mb_columns", "or Rye Bread Freq', 'Baguette Freq', 'Roll or Bageles Freq', 'Pita Freq', 'Saltine", "-d ' ' | cut -f 1 -d '_'\"%os.path.join(rawDataPath,'tmp','dfukim.txt')) pheno =pheno[~pheno.index.isin([int(dafook) for dafook", "mb_columns += ['ALT','Albumin','AST','Basophils%','Calcium','Chloride','Cholesterol,total','Creatinine', 'CRP(WIDERANGE)','CRPhs','Eosinophils%','HCT','HDLCholesterol','Hemoglobin','HbA1C%','Lymphocytes%', 'MCH','MCHC','MCV','MPV','Monocytes%','Neutrophils%','Phosphorus','Platelets','Potassium','RBC', 'RDW','Sodium','TSH','WBC','LDLCholesterol'] if 'glucose' in args: mb_columns += ['95P_Glucose','Glucose_Noise','Max_Glucose','Median_Glucose','WakeupGlucose',", "# print set(phenoChip.index.values)-set(phenoAll.index.values) # print len(set(phenoChip.index)-set(phenoAll.index)) print(pheno.columns.values.tolist()) print(pheno.shape) sum=0 for participant in pheno[['Age','Gender']].index.values:", "mb_columns += ['BMI','BPdia','BPsys','HeartRate','Height','Hips','WHR','Waist'] if 's_stats_pheno' in args: s_stats=['BMI','Cholesterol,total','WakeupGlucose','Albumin','Creatinine','HbA1C%','Height','Hips','Waist','WHR','HDLCholesterol'] #, 'Triglycerides', 'LDLCholesterol'] mb_columns+=s_stats mb_columns=list(set(mb_columns))", "Organs Freq', 'Fish Cooked, Baked or Grilled Freq', 'Fried Fish Freq', 'Canned Tuna", "if ('-9' not in args): pheno.replace(-9, np.nan, inplace=True) if 'permute' in args: pheno=pandas.DataFrame(pheno.values[np.random.permutation(pheno.shape[0])],index=pheno.index,columns=pheno.columns)", "'Entrecote', 'Cottage cheese', 'Oil', 'Natural Yogurt', 'Walnuts', 'Edamame', 'Majadra', 'Oatmeal', 'Soy sauce', 'Strawberry',", "'LDLCholesterol'] mb_columns+=s_stats mb_columns=list(set(mb_columns)) if 'fid' in args: mb_columns = ['FID']+mb_columns ########################FFQ START##################### if", "Freq', 'Ice Cream or Popsicle which contains Dairy Freq', 'Popsicle Without Dairy Freq',", "in ['c_', 'g_', 'o_', 's_', 'k_', 'p_', 'f_']): print c alpha_diversity_s = (pheno_s>pheno_s.min().min()).sum(axis=1)", "dates', 'Carrot Cake', 'Doritos', 'Israeli couscous', 'Pistachio', 'Date honey', 'Vinaigrette', 'Bamba', 'Dark Chocolate',", "= Bed(os.path.join(cleanDataPath, 'PNP_autosomal_clean2_nodfukim_norelated'), count_A1=True)#.read() df_fam_no_related = pandas.read_csv(os.path.join(cleanDataPath, 'PNP_autosomal_clean2_nodfukim_norelated.fam'), delim_whitespace=True, index_col=0, header=None) df_fam =", "we remove him\" features_to_drop=[] if ('IsGenotek' not in args) and ('covars' not in", "['Alcoholic_Drinks_Freq','Cigarretes_per_day','Coffee_Freq','Start_smoking_age'] if 'antropo' in args: mb_columns += ['BMI','BPdia','BPsys','HeartRate','Height','Hips','WHR','Waist'] if 's_stats_pheno' in args: s_stats=['BMI','Cholesterol,total','WakeupGlucose','Albumin','Creatinine','HbA1C%','Height','Hips','Waist','WHR','HDLCholesterol']", "'Fried Fish Freq', 'Canned Tuna or Tuna Salad Freq', 'Fish (not Tuna) Pickled,", "Donut Freq', 'Cake, Torte Cakes, Chocolate Cake Freq', 'Fruit Pie or Cake Freq',", "'Dried Fruits Freq', 'Fruit Salad Freq'] if 'hunger' in args: mb_columns_extra += ['General", "Freq', 'Apricot Fresh or Dry, or Loquat Freq', 'Grapes or Raisins Freq', 'Banana", "'Corn Freq', 'Parsley, Celery, Fennel, Dill, Cilantro, Green Onion Freq', 'Fresh Vegetable Salad", "or Cooked, Carrot Juice Freq', 'Corn Freq', 'Parsley, Celery, Fennel, Dill, Cilantro, Green", "'Ice cream', 'Cream Cake', 'Green cabbage', 'Olives', 'Balsamic vinegar', 'Peach', 'Light Yellow Cheese',", "'Chicken Meatballs', 'Burekas', 'Carrots', 'Tofu', 'Wholemeal Pita', 'Sunflower seeds', 'Coriander', 'Ciabatta', 'Tomato sauce',", "'Biscuit', 'Potato chips', 'White Cheese', 'French fries', 'Wholemeal Bread', 'Tuna Salad', 'Chocolate spread',", "' ' | cut -f 1 -d '_'\"%os.path.join(rawDataPath,'tmp','dfukim.txt')) pheno =pheno[~pheno.index.isin([int(dafook) for dafook in", "'Balsamic vinegar', 'Peach', 'Light Yellow Cheese', 'Red pepper', 'Bagel', 'Entrecote', 'Cottage cheese', 'Oil',", "'Bagel', 'Entrecote', 'Cottage cheese', 'Oil', 'Natural Yogurt', 'Walnuts', 'Edamame', 'Majadra', 'Oatmeal', 'Soy sauce',", "'White Cheese', 'French fries', 'Wholemeal Bread', 'Tuna Salad', 'Chocolate spread', 'Kebab', 'Rice crackers',", "kwarg in list(kwargs.keys()): assert kwarg in known_kwargs, 'unkown kwarg: %s'%(kwarg) if ('16s' in", "args: mb_columns_extra += ['C-Section','Home delivery','Was breastfed'] if 'dressSweetners' in args: mb_columns_extra += ['Oil", "else: threshold=kwargs['threshold'] if 'ratio' in kwargs: ratio=kwargs['ratio'] mb_columns = [c for c in", "Tuna Fish', 'Vegetable Salad', 'Fried eggplant', 'Salmon', 'Cashew', 'Jewish donut', 'Rugelach', 'Cake', 'Ravioli',", "pheno.loc[pheno['IsGenotek']==0] mb_columns = [] if 'taxa' in kwargs: if kwargs['taxa'][0]=='*': kwargs['taxa']=[initial+kwargs['taxa'][1:] for initial", "'Corn schnitzel', 'Cappuccino', 'Low fat Milk', 'Pickled cucumber', 'Soymilk', 'Dates', 'Croissant', 'Biscuit', 'Potato", "list(kwargs.keys())): assert len(set(['all_bac','s', 'g','f','o','c','p','otu']).intersection(set(args)))==0, \\ 'taxa is mutual exclusive with all_bac,s,g,f,o,c,p,otu' if 'include_allPNP'", "'Pesek Zman snack', 'Green onions', 'Mushrooms', 'Lemon juice', 'Canned Tuna Fish', 'Vegetable Salad',", "Tomato Sauce, Tomato Soup Freq', 'Red Pepper Freq', 'Green Pepper Freq', 'Cucumber Freq',", "if 'activity' in args: mb_columns_extra += ['Work activity','Physical activity - mins','Physical activity -", "Juice Freq', 'Juice Freq', 'Diet Soda Freq', 'Regular Sodas with Sugar Freq', 'Decaffeinated", "mutually exclusive' if ('taxa' in list(kwargs.keys())): assert len(set(['all_bac','s', 'g','f','o','c','p','otu']).intersection(set(args)))==0, \\ 'taxa is mutual", "Cheese', 'Chicken soup', 'Wholemeal Roll', 'Canned corn', 'Salty Cheese', 'Melawach', 'White cake', 'Apple',", "+= mealsColumns ########################FFQ END##################### #for c in pheno: print c mb_columns=list(set(mb_columns)) pheno= pheno[mb_columns]", "have no 'Age','Gender' # if set(['Age','Gender','Calories_kcal','Carbs_g','Fat_g','Protain_g','IsGenotek'])<=set(pheno.columns.values): keep_inds=pheno.loc[:,['Age','Gender','Calories_kcal','Carbs_g','Fat_g','Protain_g','IsGenotek']].replace(-9, np.nan).dropna().index.values beforeNumParticpants=pheno.shape[0] pheno=pheno.loc[keep_inds] afterNumParticpants=pheno.shape[0] if beforeNumParticpants-afterNumParticpants>0:", "= pheno.loc[pheno['IsGenotek']==0] mb_columns = [] if 'taxa' in kwargs: if kwargs['taxa'][0]=='*': kwargs['taxa']=[initial+kwargs['taxa'][1:] for", "not in args, 'include_allPNP does not support dicotomize bacteria' if 'IsGenotek' in args:", "pheno= pheno[mb_columns] if 'threshold' not in kwargs: threshold = -4 else: threshold=kwargs['threshold'] if", "Freq', 'Turkey Meatballs, Beef, Chicken Freq', 'Shish Kebab in Pita Bread Freq', 'Falafel", "'Small Burekas Freq', 'Jachnun, Mlawah, Kubana, Cigars Freq', 'Pizza Freq'] if 'qualityOfLiving' in", "juice', 'Potatoes', 'Halva', 'Yellow pepper', 'Mango', 'Lasagna', 'Popcorn', 'Hummus Salad', 'Tilapia', 'Pizza', 'Fried", "and ('other' not in args): if 'calories' not in args: features_to_drop +=['Age','Gender','Calories_kcal','Carbs_g','Fat_g','Protain_g','IsGenotek'] else:", "'lactose','include_allPNP','IsGenotek','permute','meals','other','drugs', 'calories','bloodType','questionnaires','keep_missingCovars','activity','activityTypesFreq','cereals', 'delivery','dressSweetners','drinks','fruits','hunger', 'legumes','meatProducts','pastry','qualityOfLiving', 'smoking','sweets','vegetables','womenOnlyQuestions', 'genotek_only', 'swab_only'] ffq_args = ['activity','activityTypesFreq','cereals', 'delivery','dressSweetners','drinks','fruits','hunger', 'legumes','meatProducts','pastry','qualityOfLiving', 'smoking','sweets','vegetables','womenOnlyQuestions']", "= np.log10(df_taxa) pheno = pheno.merge(df_taxa, left_index=True, right_index=True) pheno_s = pheno[[c for c in", "Freq', 'Chicken or Turkey With Skin Freq', 'Chicken or Turkey Without Skin Freq',", "taxa_level+'.txt'), sep='\\t', index_col=0) df_taxa[df_taxa<1e-3] = 1e-4 df_taxa = np.log10(df_taxa) pheno = pheno.merge(df_taxa, left_index=True,", "'Coriander', 'Ciabatta', 'Tomato sauce', 'Heavy cream', 'Banana', 'Kif Kef', 'Mustard', 'Coke', 'Vegetable Soup',", "Freq', 'Orange or Grapefruit Freq', 'Orange or Grapefruit Juice Freq', 'Apple Freq', 'Apricot", "Pie or Cake Freq', 'Coated or Stuffed Cookies, Waffles or Biscuits Freq', 'Simple", "np.log10(df_taxa) pheno = pheno.merge(df_taxa, left_index=True, right_index=True) pheno_s = pheno[[c for c in pheno.columns", "'o_', 's_', 'k_', 'p_', 'f_']): print c alpha_diversity_s = (pheno_s>pheno_s.min().min()).sum(axis=1) alpha_diversity_g = (pheno_g>pheno_g.min().min()).sum(axis=1)", "Brown Sugar Freq', 'Artificial Sweeteners Freq',] if 'drinks' in args: mb_columns_extra += ['Nectar,", "'Granola', 'Beet', 'Couscous', 'Beet Salad', 'Chocolate Mousse Cake', 'Sweet Roll', 'Danish', 'Coffee', 'Pasta", "('IsGenotek' not in args) and ('covars' not in args) and ('covars_noPCs' not in", "'Honey, Jam, fruit syrup, Maple syrup Freq', 'White or Brown Sugar Freq', 'Artificial", "if 'meals' in args: mealsColumns=[val.replace(' ','_') for val in meals] #Correct by total", "Sauce, Tomato Soup Freq', 'Red Pepper Freq', 'Green Pepper Freq', 'Cucumber Freq', 'Zucchini", "#print beforeNumParticpants-afterNumParticpants #print \"805175 has no FFQ!!!!! that is why we remove him\"", "c in df_household.columns if int(c) in pheno.index]] df_household = df_household[df_household.index.isin(pheno.index)] remove_inds = df_household.index[vc.VertexCut().work(df_household.values,", "drop=True) pheno_s = pheno_nodic[[c for c in pheno_nodic.columns if c[:2]=='s_']] pheno_g = pheno_nodic[[c", "'Ciabatta', 'Tomato sauce', 'Heavy cream', 'Banana', 'Kif Kef', 'Mustard', 'Coke', 'Vegetable Soup', 'Sausages',", "[arg] mb_columns_extra=[val.replace(' ','_') for val in mb_columns_extra] mb_columns+=mb_columns_extra if 'meals' in args: mealsColumns=[val.replace('", "else: pheno =pandas.read_csv(pheno_fn_bac,sep='\\t') pheno.set_index('IID', inplace=True, drop=True) if 'include_allPNP'in args: status, output = subprocess.getstatusoutput(\"cut", "'Falafel', 'Noodles', 'Jachnun', 'Turkey', 'Sushi', 'Brazil nuts', 'Orange', 'Rice', 'Diet Fruit Drink', 'Corn", "Cake', 'Green cabbage', 'Olives', 'Balsamic vinegar', 'Peach', 'Light Yellow Cheese', 'Red pepper', 'Bagel',", "'drugs' in args: mb_columns+=drug_args else: for arg in drug_args: if arg in args:", "contraceptives', 'Regular period', 'Irregular period', 'No period','Hormonal replacment', 'Past breastfeeding'] if 'other' in", "Freq', 'Internal Organs Freq', 'Fish Cooked, Baked or Grilled Freq', 'Fried Fish Freq',", "#for c in pheno: print c mb_columns=list(set(mb_columns)) pheno= pheno[mb_columns] if 'threshold' not in", "beforeNumParticpants=pheno.shape[0] pheno=pheno.loc[keep_inds] afterNumParticpants=pheno.shape[0] if beforeNumParticpants-afterNumParticpants>0: pass #print \"Removing participants with missing covars!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\" #print", "'Potatoes Boiled, Baked, Mashed, Potatoes Salad Freq', 'Fries Freq', 'Pasta or Flakes Freq']", "= pandas.read_csv(os.path.join(cleanDataPath, 'EnvironmentBlock.txt'), delim_whitespace=True) df_household = df_household[[c for c in df_household.columns if int(c)", "'Salty Cheese', 'Melawach', 'White cake', 'Apple', 'Lettuce Salad', 'Cereals', 'Yellow Cheese', 'Tea', 'Beer',", "'order', 'class', 'phylum']: df_taxa = pandas.read_csv(os.path.join(PNP_16S_DIR, taxa_level+'.txt'), sep='\\t', index_col=0) df_taxa[df_taxa<1e-3] = 1e-4 df_taxa", "in args): assert 'dic' not in args, '16s and dic are mutually exclusive'", "in args: assert 'dic' not in args, 'dic and no_log are mutually exclusive'", "Freq', 'Mixed Meat Dishes as Moussaka, Hamin, Cuba Freq', 'Mixed Chicken or Turkey", "-f 1 %s -d ' ' | cut -f 1 -d '_'\"%os.path.join(rawDataPath,'tmp','dfukim.txt')) pheno", "'Regular Tea Freq', 'Beer Freq', 'Sweet Dry Wine, Cocktails Freq', 'Alcoholic Drinks Freq']", "Mashed, Potatoes Salad Freq', 'Fries Freq', 'Pasta or Flakes Freq'] if 'delivery' in", "'Type 2 activity - freq','T3Activity kind','Type 3 activity - freq'] if 'bloodType' in", "'Cappuccino', 'Low fat Milk', 'Pickled cucumber', 'Soymilk', 'Dates', 'Croissant', 'Biscuit', 'Potato chips', 'White", "+= [c for c in pheno.columns if c[:2]=='o_' ] if 'c' in args:", "'threshold' not in kwargs: threshold = -4 else: threshold=kwargs['threshold'] if 'ratio' in kwargs:", "Freq', 'Alcoholic Drinks Freq'] if 'fruits' in args: mb_columns_extra += ['Mandarin or Clementine", "args: if '16s' in args: sterile_individuals = alpha_diversity_g[alpha_diversity_g < 4].index else: sterile_individuals =", "pheno.replace(-9, np.nan, inplace=True) if 'permute' in args: pheno=pandas.DataFrame(pheno.values[np.random.permutation(pheno.shape[0])],index=pheno.index,columns=pheno.columns) return pheno if __name__==\"__main__\": #", "B','Blood RH-'] if 'cereals' in args: mb_columns_extra += ['Cornflakes Freq','Granola or Bernflaks Freq','Cooked", "'Israeli couscous', 'Pistachio', 'Date honey', 'Vinaigrette', 'Bamba', 'Dark Chocolate', 'Turkey Shawarma', 'Olive oil',", "= extract('s','include_allPNP','covars')#'include_allPNP','keep_household','ffq','keep_related')#'include_allPNP', print(phenoAll.shape) print(phenoAll.columns) phenoAll = extract('s','include_allPNP') print(phenoAll.shape) print(phenoAll.columns) phenoChip = extract('keep_household','s','keep_related') print(phenoChip.shape)", "4].index else: sterile_individuals = alpha_diversity_s[alpha_diversity_s < 15].index pheno=pheno[~pheno.index.isin(sterile_individuals)] if 'keep_household' not in args:", "Cheese', 'Red pepper', 'Bagel', 'Entrecote', 'Cottage cheese', 'Oil', 'Natural Yogurt', 'Walnuts', 'Edamame', 'Majadra',", "'Oatmeal', 'Soy sauce', 'Strawberry', 'Pastrami', 'Lemonade', 'Pasta with tomato sauce', 'Chicken']#removed: u'Soda water',u'Water',", "+= ['ALT','Albumin','AST','Basophils%','Calcium','Chloride','Cholesterol,total','Creatinine', # 'CRP(WIDERANGE)','CRPhs','Eosinophils%','HCT','HDLCholesterol','Hemoglobin','HbA1C%','Lymphocytes%', # 'MCH','MCHC','MCV','MPV','Monocytes%','Neutrophils%','Phosphorus','Platelets','Potassium','RBC', # 'RDW','Sodium','TSH','WBC','AlkalinePhosphatase','GGT','LDH','Iron','LDLCholesterol','Magnesium', # 'Triglycerides','TotalProtein','TotalBilirubin','Urea'] mb_columns += ['ALT','Albumin','AST','Basophils%','Calcium','Chloride','Cholesterol,total','Creatinine',", "'Thousand Island Dressing, Garlic Dressing Freq', 'Honey, Jam, fruit syrup, Maple syrup Freq',", "+=['Age','Gender','Calories_kcal','Carbs_g','Fat_g','Protain_g','IsGenotek'] else: features_to_drop +=['Age','Gender'] if ('include_allPNP' not in args) and ('PCs' not in", "pysnptools.snpreader.bed import Bed import subprocess cleanDataPath='/net/mraid08/export/jafar/Microbiome/Analyses/PNPChip/cleanData/' rawDataPath='/net/mraid08/export/jafar/Microbiome/Analyses/PNPChip/rawData' pheno_fn_bac =os.path.join(cleanDataPath,'noMissingKakiPhenotypesWithCovariates_nodfukim.phenotypes') #pheno_fn_bac =os.path.join(cleanDataPath,'allChipPhenotypes_nodfukimWith5PCair.phenotypes') pheno_fn_bacDic=os.path.join(cleanDataPath,'dicNoMissingKakiPhenotypesWithCovariates_nodfukim.phenotypes') pheno_fn_bacAllPNP=os.path.join(rawDataPath,'allPNPPhenotypes.phenotypes')", "pistachios Freq','Peanuts Freq'] if 'vegetables' in args: mb_columns_extra += ['Tomato Freq','Cooked Tomatoes, Tomato", "Cabbage Freq', 'Lettuce Freq','Carrots, Fresh or Cooked, Carrot Juice Freq', 'Corn Freq', 'Parsley,", "'Boiled corn', 'Chicken drumstick', 'Pita', 'Pasta Bolognese', 'Chicken Meatballs', 'Burekas', 'Carrots', 'Tofu', 'Wholemeal", "mb_columns_extra += ['Oil as an addition for Salads or Stews Freq','Mayonnaise Including Light", "if beforeNumParticpants-afterNumParticpants>0: pass #print \"Removing participants with missing covars!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\" #print beforeNumParticpants-afterNumParticpants #print \"805175", "'qualityOfLiving' in args: mb_columns_extra += ['Stress','Sleep quality'] if 'smoking' in args: mb_columns_extra +=", "df_household.index[vc.VertexCut().work(df_household.values, 0.5)] pheno=pheno[~pheno.index.isin(remove_inds)] if 'keep_missingCovars' not in args: #One participant 244624 has no", "'Cucumber Freq', 'Zucchini or Eggplant Freq','Peas, Green Beans or Okra Cooked Freq', 'Cauliflower", "or Grapefruit Freq', 'Orange or Grapefruit Juice Freq', 'Apple Freq', 'Apricot Fresh or", "pheno_nodic.columns if c[:2]=='g_']] else: if 'include_allPNP' in args: pheno =pandas.read_csv(pheno_fn_bacAllPNP,sep='\\t') else: pheno =pandas.read_csv(pheno_fn_bac,sep='\\t')", "+= ['Falafel in Pita Bread Freq', 'Cooked Legumes Freq', 'Processed Meat Free Products", "Popsicle which contains Dairy Freq', 'Popsicle Without Dairy Freq', 'Black or White Grains,", "mb_columns += [c for c in pheno.columns if c[:2]=='f_' ] if 'o' in", "Freq'] if 'meatProducts' in args: mb_columns_extra += ['Egg Recipes Freq', 'Egg, Hard Boiled", "args: mb_columns+=drug_args else: for arg in drug_args: if arg in args: mb_columns +=", "meals=['Vodka or Arak', 'Avocado', 'Parsley', 'Coated peanuts', 'Sugar', 'Smoked Salmon', 'Melon', 'Roll', 'Whipped", "c in pheno if c[:2] not in ('s_', 'g_', 'f_', 'o_', 'c_', 'p_')]]", "contains Dairy Freq', 'Popsicle Without Dairy Freq', 'Black or White Grains, Watermelon Seeds", "'Pastrami', 'Lemonade', 'Pasta with tomato sauce', 'Chicken']#removed: u'Soda water',u'Water', u'Salt', known_args+= ffq_args known_args+=", "if ('covars' not in args) and ('covars_noPCs' not in args) and ('other' not", "Freq',] if 'drinks' in args: mb_columns_extra += ['Nectar, Cider Freq', 'Diet Juice Freq',", "'Cake, Torte Cakes, Chocolate Cake Freq', 'Fruit Pie or Cake Freq', 'Coated or", "= pheno_nodic[[c for c in pheno_nodic.columns if c[:2]=='g_']] else: if 'include_allPNP' in args:", "args): #bed = Bed(os.path.join(cleanDataPath, 'PNP_autosomal_clean2_nodfukim_norelated'), count_A1=True)#.read() df_fam_no_related = pandas.read_csv(os.path.join(cleanDataPath, 'PNP_autosomal_clean2_nodfukim_norelated.fam'), delim_whitespace=True, index_col=0, header=None)", "Freq','Lemon Freq', 'Onion Freq', 'Garlic Freq', 'Vegetable Soup Freq', 'Hummus Salad Freq', 'Tahini", "args if 'dic' in args: pheno =pandas.read_csv(pheno_fn_bacDic,sep='\\t') pheno.set_index('IID', inplace=True, drop=True) pheno_nodic =pandas.read_csv(pheno_fn_bac,sep='\\t') pheno_nodic.set_index('IID',", "sys from pysnptools.snpreader.bed import Bed import subprocess cleanDataPath='/net/mraid08/export/jafar/Microbiome/Analyses/PNPChip/cleanData/' rawDataPath='/net/mraid08/export/jafar/Microbiome/Analyses/PNPChip/rawData' pheno_fn_bac =os.path.join(cleanDataPath,'noMissingKakiPhenotypesWithCovariates_nodfukim.phenotypes') #pheno_fn_bac =os.path.join(cleanDataPath,'allChipPhenotypes_nodfukimWith5PCair.phenotypes')", "Freq', 'Decaffeinated Coffee Freq', 'Coffee Freq', 'Herbal Tea Freq', 'Green Tea Freq', 'Regular", "decides which individuals to remove on the fly import ForPaper.VertexCut as vc df_household", "green beans', 'Cooked mushrooms', 'Watermelon', 'Grilled cheese', 'Bissli', 'Pullet', 'Hummus', 'Chinese Chicken Noodles',", "Grapefruit Freq', 'Orange or Grapefruit Juice Freq', 'Apple Freq', 'Apricot Fresh or Dry,", "'keep_missingCovars' not in args: #One participant 244624 has no 'Calories_kcal','Carbs_g','Fat_g','Protain_g' #3 participant 86356,762339,805175", "'Processed Meat Free Products Freq'] if 'meatProducts' in args: mb_columns_extra += ['Egg Recipes", "Dill, Cilantro, Green Onion Freq', 'Fresh Vegetable Salad Without Dressing or Oil Freq',", "'dic' in args: pheno =pandas.read_csv(pheno_fn_bacDic,sep='\\t') pheno.set_index('IID', inplace=True, drop=True) pheno_nodic =pandas.read_csv(pheno_fn_bac,sep='\\t') pheno_nodic.set_index('IID', inplace=True, drop=True)", "Freq'] if 'delivery' in args: mb_columns_extra += ['C-Section','Home delivery','Was breastfed'] if 'dressSweetners' in", "Eggplant Freq','Peas, Green Beans or Okra Cooked Freq', 'Cauliflower or Broccoli Freq','Sweet Potato", "__name__==\"__main__\": # pheno=extract('dic','covars','keep_household',\"pastry\",ratio=0.2)#'all_bac' phenoAll = extract('s','include_allPNP','covars')#'include_allPNP','keep_household','ffq','keep_related')#'include_allPNP', print(phenoAll.shape) print(phenoAll.columns) phenoAll = extract('s','include_allPNP') print(phenoAll.shape) print(phenoAll.columns)", "'Age','Gender' # if set(['Age','Gender','Calories_kcal','Carbs_g','Fat_g','Protain_g','IsGenotek'])<=set(pheno.columns.values): keep_inds=pheno.loc[:,['Age','Gender','Calories_kcal','Carbs_g','Fat_g','Protain_g','IsGenotek']].replace(-9, np.nan).dropna().index.values beforeNumParticpants=pheno.shape[0] pheno=pheno.loc[keep_inds] afterNumParticpants=pheno.shape[0] if beforeNumParticpants-afterNumParticpants>0: pass #print", "if 'all_bac' in args: args=list(args)+['s','g','f','o','c','p'] if 's' in args: mb_columns += [c for", "Freq', 'Falafel in Pita version 2 Freq','Processed Meat Products Freq','Beef, Veal, Lamb, Pork,", "Red Cabbage Freq', 'Lettuce Freq','Carrots, Fresh or Cooked, Carrot Juice Freq', 'Corn Freq',", "in args: mb_columns += [c for c in pheno.columns if c[:2]=='c_' ] if", "mb_columns_extra += ['T1Activity kind','Type 1 activity - freq','T2Activity kind', 'Type 2 activity -", "if c[:4]=='OTU_' ] if 'no_log' in args: assert 'dic' not in args, 'dic", "+= ['95P_Glucose','Glucose_Noise','Max_Glucose','Median_Glucose','WakeupGlucose', 'MeanGlucoseResponse','MeanBreadResponse','MeanBreadButterResponse'] if 'ffq' in args: mb_columns += ['Alcoholic_Drinks_Freq','Cigarretes_per_day','Coffee_Freq','Start_smoking_age'] if 'antropo' in", "drumstick', 'Pita', 'Pasta Bolognese', 'Chicken Meatballs', 'Burekas', 'Carrots', 'Tofu', 'Wholemeal Pita', 'Sunflower seeds',", "assert kwarg in known_kwargs, 'unkown kwarg: %s'%(kwarg) if ('16s' in args): assert 'dic'", "Freq', 'Corn Freq', 'Parsley, Celery, Fennel, Dill, Cilantro, Green Onion Freq', 'Fresh Vegetable", "if '16s' in args: sterile_individuals = alpha_diversity_g[alpha_diversity_g < 4].index else: sterile_individuals = alpha_diversity_s[alpha_diversity_s", "in args: mb_columns += [c for c in pheno.columns if c[:4]=='OTU_' ] if", "pheno=extract('dic','covars','keep_household',\"pastry\",ratio=0.2)#'all_bac' phenoAll = extract('s','include_allPNP','covars')#'include_allPNP','keep_household','ffq','keep_related')#'include_allPNP', print(phenoAll.shape) print(phenoAll.columns) phenoAll = extract('s','include_allPNP') print(phenoAll.shape) print(phenoAll.columns) phenoChip =", "features_to_drop=[] if ('IsGenotek' not in args) and ('covars' not in args) and ('covars_noPCs'", "'cereals' in args: mb_columns_extra += ['Cornflakes Freq','Granola or Bernflaks Freq','Cooked Cereal such as", "'Peanuts', 'Mayonnaise', 'Persimmon', 'Apple juice', 'Stuffed Peppers', 'Egg', 'Pear', 'Peas', 'Pecan', 'Cooked cauliflower',", "beforeNumParticpants-afterNumParticpants #print \"805175 has no FFQ!!!!! that is why we remove him\" features_to_drop=[]", "Cake Freq', 'Coated or Stuffed Cookies, Waffles or Biscuits Freq', 'Simple Cookies or", "pheno[['Age','Gender']].index.values: # if np.isnan(pheno.loc[participant,'Calories_kcal']) or \\ # np.isnan(pheno.loc[participant,'Carbs_g']) or \\ # np.isnan(pheno.loc[participant,'Fat_g']) or", "'Strawberry', 'Pastrami', 'Lemonade', 'Pasta with tomato sauce', 'Chicken']#removed: u'Soda water',u'Water', u'Salt', known_args+= ffq_args", "] if 'f' in args: mb_columns += [c for c in pheno.columns if", "'Salmon', 'Cashew', 'Jewish donut', 'Rugelach', 'Cake', 'Ravioli', 'Tomatoes', 'Wholemeal Light Bread', 'Marble Cake',", "['Mandarin or Clementine Freq', 'Orange or Grapefruit Freq', 'Orange or Grapefruit Juice Freq',", "END##################### #for c in pheno: print c mb_columns=list(set(mb_columns)) pheno= pheno[mb_columns] if 'threshold' not", "Cookies, Waffles or Biscuits Freq', 'Simple Cookies or Biscuits Freq', 'Ice Cream or", "Freq', 'Green Tea Freq', 'Regular Tea Freq', 'Beer Freq', 'Sweet Dry Wine, Cocktails", "ffq_args known_args+= drug_args known_kwargs = ['ratio', 'threshold','taxa'] for arg in args: assert arg", "if ('16s' in args): pheno = pheno[[c for c in pheno if c[:2]", "df_related=df_fam[~df_fam.index.isin(df_fam_no_related.index)] pheno=pheno[(~pheno.index.isin(df_related.index))] if ('keep_sterile') not in args: if '16s' in args: sterile_individuals =", "in args: pheno = pheno.loc[pheno['IsGenotek']==1] if 'swab_only' in args: pheno = pheno.loc[pheno['IsGenotek']==0] mb_columns", "'Baguette Freq', 'Roll or Bageles Freq', 'Pita Freq', 'Saltine Crackers or Matzah Freq',", "Cookies as Rogallach, Croissant or Donut Freq', 'Cake, Torte Cakes, Chocolate Cake Freq',", "'MCH','MCHC','MCV','MPV','Monocytes%','Neutrophils%','Phosphorus','Platelets','Potassium','RBC', # 'RDW','Sodium','TSH','WBC','AlkalinePhosphatase','GGT','LDH','Iron','LDLCholesterol','Magnesium', # 'Triglycerides','TotalProtein','TotalBilirubin','Urea'] mb_columns += ['ALT','Albumin','AST','Basophils%','Calcium','Chloride','Cholesterol,total','Creatinine', 'CRP(WIDERANGE)','CRPhs','Eosinophils%','HCT','HDLCholesterol','Hemoglobin','HbA1C%','Lymphocytes%', 'MCH','MCHC','MCV','MPV','Monocytes%','Neutrophils%','Phosphorus','Platelets','Potassium','RBC', 'RDW','Sodium','TSH','WBC','LDLCholesterol'] if 'glucose'", "in pheno: ### if (c[:2] not in ['c_', 'g_', 'o_', 's_', 'k_', 'p_',", "'Diet Juice Freq', 'Juice Freq', 'Diet Soda Freq', 'Regular Sodas with Sugar Freq',", "len(set(phenoChip.index)-set(phenoAll.index)) print(pheno.columns.values.tolist()) print(pheno.shape) sum=0 for participant in pheno[['Age','Gender']].index.values: # if np.isnan(pheno.loc[participant,'Calories_kcal']) or \\", "Coke', 'Dried dates', 'Carrot Cake', 'Doritos', 'Israeli couscous', 'Pistachio', 'Date honey', 'Vinaigrette', 'Bamba',", "Bread', 'Tuna Salad', 'Chocolate spread', 'Kebab', 'Rice crackers', 'Wafers', 'Lettuce', 'Rice Noodles', 'Lentils',", "pheno = pheno.loc[pheno['IsGenotek']==1] if 'swab_only' in args: pheno = pheno.loc[pheno['IsGenotek']==0] mb_columns = []", "mb_columns += [c for c in pheno.columns if c[:4]=='OTU_' ] if 'no_log' in", "['c_', 'g_', 'o_', 's_', 'k_', 'p_', 'f_']): print c alpha_diversity_s = (pheno_s>pheno_s.min().min()).sum(axis=1) alpha_diversity_g", "Freq', 'Roll or Bageles Freq', 'Pita Freq', 'Saltine Crackers or Matzah Freq', 'Wholemeal", "pheno=pheno[~pheno.index.isin(remove_inds)] if 'keep_missingCovars' not in args: #One participant 244624 has no 'Calories_kcal','Carbs_g','Fat_g','Protain_g' #3", "threshold = -4 else: threshold=kwargs['threshold'] if 'ratio' in kwargs: ratio=kwargs['ratio'] mb_columns = [c", "'Chocolate spread', 'Kebab', 'Rice crackers', 'Wafers', 'Lettuce', 'Rice Noodles', 'Lentils', 'Mutton', 'Wholemeal Noodles',", "if 'p' in args: mb_columns += [c for c in pheno.columns if c[:2]=='p_'", "if 'swab_only' in args: pheno = pheno.loc[pheno['IsGenotek']==0] mb_columns = [] if 'taxa' in", "pheno.columns if c[:2]=='p_' ] if 'otu' in args: mb_columns += [c for c", "+= taxadf.columns.values.tolist() if 'all_bac' in args: args=list(args)+['s','g','f','o','c','p'] if 's' in args: mb_columns +=", "pheno.columns if c[:2]=='s_' ] if 'g' in args: mb_columns += [c for c", "('PCs' not in args): features_to_drop+=['PC1','PC2','PC3','PC4','PC5'] pheno=pheno.drop(features_to_drop,axis=1) if ('-9' not in args): pheno.replace(-9, np.nan,", "in args, 'include_allPNP does not support dicotomize bacteria' if 'IsGenotek' in args: assert", "'Cooked carrots', 'Lemon', 'Salty Cookies', 'Beef', 'Meatballs', 'Hamburger sandwich', 'Chicken thighs', 'Granola', 'Beet',", "'EnvironmentBlock.txt'), delim_whitespace=True) df_household = df_household[[c for c in df_household.columns if int(c) in pheno.index]]", "chips', 'White Cheese', 'French fries', 'Wholemeal Bread', 'Tuna Salad', 'Chocolate spread', 'Kebab', 'Rice", "on the fly import ForPaper.VertexCut as vc df_household = pandas.read_csv(os.path.join(cleanDataPath, 'EnvironmentBlock.txt'), delim_whitespace=True) df_household", "smokes','Ever smoked'] if 'sweets' in args: mb_columns_extra += ['Milk or Dark Chocolate Freq',", "'f' in args: mb_columns += [c for c in pheno.columns if c[:2]=='f_' ]", "Oil Freq', 'Fresh Vegetable Salad With Dressing or Oil Freq', 'Avocado Freq','Lemon Freq',", "'blood' in args: # mb_columns += ['ALT','Albumin','AST','Basophils%','Calcium','Chloride','Cholesterol,total','Creatinine', # 'CRP(WIDERANGE)','CRPhs','Eosinophils%','HCT','HDLCholesterol','Hemoglobin','HbA1C%','Lymphocytes%', # 'MCH','MCHC','MCV','MPV','Monocytes%','Neutrophils%','Phosphorus','Platelets','Potassium','RBC', # 'RDW','Sodium','TSH','WBC','AlkalinePhosphatase','GGT','LDH','Iron','LDLCholesterol','Magnesium',", "if 'legumes' in args: mb_columns_extra += ['Falafel in Pita Bread Freq', 'Cooked Legumes", "Pita', 'Sunflower seeds', 'Coriander', 'Ciabatta', 'Tomato sauce', 'Heavy cream', 'Banana', 'Kif Kef', 'Mustard',", "if 'include_allPNP' in args: assert 'dic' not in args, 'include_allPNP does not support", "and ('covars_noPCs' not in args) and ('other' not in args): if 'calories' not", "if 'dressSweetners' in args: mb_columns_extra += ['Oil as an addition for Salads or", "'Dates', 'Croissant', 'Biscuit', 'Potato chips', 'White Cheese', 'French fries', 'Wholemeal Bread', 'Tuna Salad',", "[] if 'taxa' in kwargs: if kwargs['taxa'][0]=='*': kwargs['taxa']=[initial+kwargs['taxa'][1:] for initial in ('s_', 'g_',", "Vegetable Salad With Dressing or Oil Freq', 'Avocado Freq','Lemon Freq', 'Onion Freq', 'Garlic", "'Burekas', 'Carrots', 'Tofu', 'Wholemeal Pita', 'Sunflower seeds', 'Coriander', 'Ciabatta', 'Tomato sauce', 'Heavy cream',", "Freq', 'Peach, Nectarine, Plum Freq', 'Pear Fresh, Cooked or Canned Freq','Persimmon Freq', 'Watermelon", "in args) and ('other' not in args): if 'calories' not in args: features_to_drop", "print set(phenoChip.index.values)-set(phenoAll.index.values) # print len(set(phenoChip.index)-set(phenoAll.index)) print(pheno.columns.values.tolist()) print(pheno.shape) sum=0 for participant in pheno[['Age','Gender']].index.values: #", "index_col=0, header=None) df_related=df_fam[~df_fam.index.isin(df_fam_no_related.index)] pheno=pheno[(~pheno.index.isin(df_related.index))] if ('keep_sterile') not in args: if '16s' in args:", "'Watermelon', 'Grilled cheese', 'Bissli', 'Pullet', 'Hummus', 'Chinese Chicken Noodles', 'Shakshouka', 'Tahini', 'Chicken breast',", "'Orange juice', 'Potatoes', 'Halva', 'Yellow pepper', 'Mango', 'Lasagna', 'Popcorn', 'Hummus Salad', 'Tilapia', 'Pizza',", "'Cake', 'Ravioli', 'Tomatoes', 'Wholemeal Light Bread', 'Marble Cake', 'Brown Rice', 'Cold cut', 'Gilthead", "sauce', 'Chicken']#removed: u'Soda water',u'Water', u'Salt', known_args+= ffq_args known_args+= drug_args known_kwargs = ['ratio', 'threshold','taxa']", "in pheno: print c mb_columns=list(set(mb_columns)) pheno= pheno[mb_columns] if 'threshold' not in kwargs: threshold", "('PCs') in args: mb_columns += [c for c in pheno.columns if c[:2]=='PC'] if", "'species', 'genus', 'family', 'order', 'class', 'phylum']: df_taxa = pandas.read_csv(os.path.join(PNP_16S_DIR, taxa_level+'.txt'), sep='\\t', index_col=0) df_taxa[df_taxa<1e-3]", "or Oil Freq', 'Avocado Freq','Lemon Freq', 'Onion Freq', 'Garlic Freq', 'Vegetable Soup Freq',", "'delivery','dressSweetners','drinks','fruits','hunger', 'legumes','meatProducts','pastry','qualityOfLiving', 'smoking','sweets','vegetables','womenOnlyQuestions', 'genotek_only', 'swab_only'] ffq_args = ['activity','activityTypesFreq','cereals', 'delivery','dressSweetners','drinks','fruits','hunger', 'legumes','meatProducts','pastry','qualityOfLiving', 'smoking','sweets','vegetables','womenOnlyQuestions'] drug_args=['D.lipid', 'D.All',", "in pheno.index]] df_household = df_household[df_household.index.isin(pheno.index)] remove_inds = df_household.index[vc.VertexCut().work(df_household.values, 0.5)] pheno=pheno[~pheno.index.isin(remove_inds)] if 'keep_missingCovars' not", "delim_whitespace=True) df_household = df_household[[c for c in df_household.columns if int(c) in pheno.index]] df_household", "args) and ('covars_noPCs' not in args): features_to_drop += ['IsGenotek'] if ('covars' not in", "set(phenoChip.index.values)-set(phenoAll.index.values) # print len(set(phenoChip.index)-set(phenoAll.index)) print(pheno.columns.values.tolist()) print(pheno.shape) sum=0 for participant in pheno[['Age','Gender']].index.values: # if", "244624 has no 'Calories_kcal','Carbs_g','Fat_g','Protain_g' #3 participant 86356,762339,805175 have no 'Age','Gender' # if set(['Age','Gender','Calories_kcal','Carbs_g','Fat_g','Protain_g','IsGenotek'])<=set(pheno.columns.values):", "in args: pheno =pandas.read_csv(pheno_fn_bacAllPNP,sep='\\t') else: pheno =pandas.read_csv(pheno_fn_bac,sep='\\t') pheno.set_index('IID', inplace=True, drop=True) if 'include_allPNP'in args:", "code that decides which individuals to remove on the fly import ForPaper.VertexCut as", "= [] if 'taxa' in kwargs: if kwargs['taxa'][0]=='*': kwargs['taxa']=[initial+kwargs['taxa'][1:] for initial in ('s_',", "Freq','Mayonnaise Including Light Freq', 'Thousand Island Dressing, Garlic Dressing Freq', 'Honey, Jam, fruit", "[c for c in pheno.columns if c[:2]=='g_' ] if 'f' in args: mb_columns", "'Pita', 'Pasta Bolognese', 'Chicken Meatballs', 'Burekas', 'Carrots', 'Tofu', 'Wholemeal Pita', 'Sunflower seeds', 'Coriander',", "args or ('PCs') in args: mb_columns += [c for c in pheno.columns if", "'Guacamole', 'Coleslaw', 'Americano', 'Pesek Zman snack', 'Green onions', 'Mushrooms', 'Lemon juice', 'Canned Tuna", "'Rugelach', 'Cake', 'Ravioli', 'Tomatoes', 'Wholemeal Light Bread', 'Marble Cake', 'Brown Rice', 'Cold cut',", "args=list(args)+['covars','blood','glucose','ffq','antropo'] mb_columns += ['Age','Gender','Calories_kcal','Carbs_g','Fat_g','Protain_g','IsGenotek'] if 'include_allPNP' not in args or ('PCs') in args:", "'Fish (not Tuna) Pickled, Dried, Smoked, Canned Freq'] if 'pastry' in args: mb_columns_extra", "such as Salami Freq', 'Pastrami or Smoked Turkey Breast Freq', 'Turkey Meatballs, Beef,", "and ('covars_noPCs' not in args): features_to_drop += ['IsGenotek'] if ('covars' not in args)", "'Shish Kebab in Pita Bread Freq', 'Falafel in Pita version 2 Freq','Processed Meat", "'Lettuce', 'Rice Noodles', 'Lentils', 'Mutton', 'Wholemeal Noodles', 'Green Tea', 'Schnitzel', 'Brown Sugar', 'Peanuts',", "Tomato Soup Freq', 'Red Pepper Freq', 'Green Pepper Freq', 'Cucumber Freq', 'Zucchini or", "'Steak', 'Light Bread', 'Wholemeal Crackers', 'Sugar Free Gum', 'Hamburger', 'Dark Beer', 'Cooked beets',", "%s'%(kwarg) if ('16s' in args): assert 'dic' not in args, '16s and dic", "kwargs['taxa'][1]=='_': kwargs['taxa']=[kwargs['taxa']] for taxa in kwargs['taxa']: taxadf=pheno.filter(regex=(taxa)) mb_columns += taxadf.columns.values.tolist() if 'all_bac' in", "pheno_nodic[[c for c in pheno_nodic.columns if c[:2]=='g_']] else: if 'include_allPNP' in args: pheno", "in args: mb_columns_extra += ['Stress','Sleep quality'] if 'smoking' in args: mb_columns_extra += ['Currently", "for c in pheno.columns if c[:2] in ['s_','g_','f_','o_','c_','p_']] other_columns = [c for c", "'Wine', 'Cookies', 'Challah', 'Spelled', 'Honey', 'Green beans', 'Milk', 'Peanut Butter', 'Cooked carrots', 'Lemon',", "args: mb_columns += [c for c in pheno.columns if c[:2]=='p_' ] if 'otu'", "spread', 'Kebab', 'Rice crackers', 'Wafers', 'Lettuce', 'Rice Noodles', 'Lentils', 'Mutton', 'Wholemeal Noodles', 'Green", "('s_', 'g_', 'f_', 'o_', 'c_', 'p_')]] for taxa_level in ['otu', 'species', 'genus', 'family',", "juice', 'Canned Tuna Fish', 'Vegetable Salad', 'Fried eggplant', 'Salmon', 'Cashew', 'Jewish donut', 'Rugelach',", "in pheno.columns if c[:2]=='s_' ] if 'g' in args: mb_columns += [c for", "Cakes or Cream Cakes Freq', 'Yeast Cakes and Cookies as Rogallach, Croissant or", "for val in mb_columns_extra] mb_columns+=mb_columns_extra if 'meals' in args: mealsColumns=[val.replace(' ','_') for val", "mb_columns_extra += ['Egg Recipes Freq', 'Egg, Hard Boiled or Soft Freq', 'Schnitzel Turkey", "'Schnitzel', 'Brown Sugar', 'Peanuts', 'Mayonnaise', 'Persimmon', 'Apple juice', 'Stuffed Peppers', 'Egg', 'Pear', 'Peas',", "cream', 'Cream Cake', 'Green cabbage', 'Olives', 'Balsamic vinegar', 'Peach', 'Light Yellow Cheese', 'Red", "individuals to remove on the fly import ForPaper.VertexCut as vc df_household = pandas.read_csv(os.path.join(cleanDataPath,", "not in args) and ('other' not in args): if 'calories' not in args:", "df_household = df_household[[c for c in df_household.columns if int(c) in pheno.index]] df_household =", "'Doritos', 'Israeli couscous', 'Pistachio', 'Date honey', 'Vinaigrette', 'Bamba', 'Dark Chocolate', 'Turkey Shawarma', 'Olive", "mb_columns += ['95P_Glucose','Glucose_Noise','Max_Glucose','Median_Glucose','WakeupGlucose', 'MeanGlucoseResponse','MeanBreadResponse','MeanBreadButterResponse'] if 'ffq' in args: mb_columns += ['Alcoholic_Drinks_Freq','Cigarretes_per_day','Coffee_Freq','Start_smoking_age'] if 'antropo'", "header=None) df_fam = pandas.read_csv(os.path.join(cleanDataPath, 'PNP_autosomal_clean2_nodfukim.fam'), delim_whitespace=True, index_col=0, header=None) df_related=df_fam[~df_fam.index.isin(df_fam_no_related.index)] pheno=pheno[(~pheno.index.isin(df_related.index))] if ('keep_sterile') not", "'IsGenotek and covars are mutually exclusive' if 'otu' in args: assert '16s' in", "c alpha_diversity_s = (pheno_s>pheno_s.min().min()).sum(axis=1) alpha_diversity_g = (pheno_g>pheno_g.min().min()).sum(axis=1) pheno.loc[pheno.Hips==-9, 'WHR'] = np.nan pheno.loc[pheno.Waist==-9, 'WHR']", "Cheese', 'Nectarine', 'Chicken legs', 'Nuts', 'Goat Cheese', 'Jam', 'Feta Cheese', 'Mandarin', 'Pesto', 'Sugar", "kwarg: %s'%(kwarg) if ('16s' in args): assert 'dic' not in args, '16s and", "Freq', 'Simple Cookies or Biscuits Freq', 'Ice Cream or Popsicle which contains Dairy", "cleanDataPath='/net/mraid08/export/jafar/Microbiome/Analyses/PNPChip/cleanData/' rawDataPath='/net/mraid08/export/jafar/Microbiome/Analyses/PNPChip/rawData' pheno_fn_bac =os.path.join(cleanDataPath,'noMissingKakiPhenotypesWithCovariates_nodfukim.phenotypes') #pheno_fn_bac =os.path.join(cleanDataPath,'allChipPhenotypes_nodfukimWith5PCair.phenotypes') pheno_fn_bacDic=os.path.join(cleanDataPath,'dicNoMissingKakiPhenotypesWithCovariates_nodfukim.phenotypes') pheno_fn_bacAllPNP=os.path.join(rawDataPath,'allPNPPhenotypes.phenotypes') iidsNoSharedEnv='/net/mraid08/export/genie/Microbiome/Analyses/PNPChip/cleanData/PNP_autosomal_clean2_nodfukim_NoCouples.txt' PNP_16S_DIR = '/net/mraid08/export/jafar/Microbiome/Analyses/PNPChip/16S' glycemicStatusPath='/net/mraid08/export/jafar/Microbiome/Analyses/PNPChip/glycemic_status.csv'", "Tea Freq', 'Green Tea Freq', 'Regular Tea Freq', 'Beer Freq', 'Sweet Dry Wine,", "Breast Freq', 'Turkey Meatballs, Beef, Chicken Freq', 'Shish Kebab in Pita Bread Freq',", "Okra Cooked Freq', 'Cauliflower or Broccoli Freq','Sweet Potato Freq', 'Brussels Sprouts, Green or", "Freq', 'Honey, Jam, fruit syrup, Maple syrup Freq', 'White or Brown Sugar Freq',", "'Sweet Yogurt', 'Bread', 'Onion', 'Cream Cheese', 'Chicken soup', 'Wholemeal Roll', 'Canned corn', 'Salty", "'Green Tea', 'Schnitzel', 'Brown Sugar', 'Peanuts', 'Mayonnaise', 'Persimmon', 'Apple juice', 'Stuffed Peppers', 'Egg',", "Green or Red Cabbage Freq', 'Lettuce Freq','Carrots, Fresh or Cooked, Carrot Juice Freq',", "= df_household.index[vc.VertexCut().work(df_household.values, 0.5)] pheno=pheno[~pheno.index.isin(remove_inds)] if 'keep_missingCovars' not in args: #One participant 244624 has", "2 Freq','Processed Meat Products Freq','Beef, Veal, Lamb, Pork, Steak, Golash Freq', 'Mixed Meat", "# print pheno.loc[participant,['Calories_kcal','Carbs_g','Fat_g','Protain_g','Protain_g']] # print sum if np.isnan(pheno.loc[participant,'Age']) or np.isnan(pheno.loc[participant,'Gender']) : print(\"Participant %s,", "pheno[mb_columns] if 'threshold' not in kwargs: threshold = -4 else: threshold=kwargs['threshold'] if 'ratio'", "+= [c for c in pheno.columns if c[:2]=='f_' ] if 'o' in args:", "delim_whitespace=True, index_col=0, header=None) df_fam = pandas.read_csv(os.path.join(cleanDataPath, 'PNP_autosomal_clean2_nodfukim.fam'), delim_whitespace=True, index_col=0, header=None) df_related=df_fam[~df_fam.index.isin(df_fam_no_related.index)] pheno=pheno[(~pheno.index.isin(df_related.index))] if", "Salmon', 'Melon', 'Roll', 'Whipped cream', 'Coconut milk', 'Pretzels', 'Kohlrabi', 'Eggplant Salad', 'Cooked green", "args) and ('other' not in args): if 'calories' not in args: features_to_drop +=['Age','Gender','Calories_kcal','Carbs_g','Fat_g','Protain_g','IsGenotek']", "Loquat Freq', 'Grapes or Raisins Freq', 'Banana Freq', 'Melon Freq', 'Kiwi or Strawberries", "in kwargs: threshold = -4 else: threshold=kwargs['threshold'] if 'ratio' in kwargs: ratio=kwargs['ratio'] mb_columns", "args: #One participant 244624 has no 'Calories_kcal','Carbs_g','Fat_g','Protain_g' #3 participant 86356,762339,805175 have no 'Age','Gender'", "alpha_diversity_g = (pheno_g>pheno_g.min().min()).sum(axis=1) pheno.loc[pheno.Hips==-9, 'WHR'] = np.nan pheno.loc[pheno.Waist==-9, 'WHR'] = np.nan pheno['LDLCholesterol'] =", "args: pheno = pheno.loc[pheno['IsGenotek']==0] mb_columns = [] if 'taxa' in kwargs: if kwargs['taxa'][0]=='*':", "df_glucose = pandas.read_csv(glycemicStatusPath).set_index('RegNum') pheno = df_glucose.merge(pheno, left_index=True, right_index=True,how='right') mb_columns +=['median_Without_BMI_ALT_Overall','WakeupGlucose','BMI','VegeterianScale'] pheno.loc[pheno['VegeterianScale']<0, 'VegeterianScale']=np.nan if", "pheno.index]] df_household = df_household[df_household.index.isin(pheno.index)] remove_inds = df_household.index[vc.VertexCut().work(df_household.values, 0.5)] pheno=pheno[~pheno.index.isin(remove_inds)] if 'keep_missingCovars' not in", "if np.isnan(pheno.loc[participant,'Age']) or np.isnan(pheno.loc[participant,'Gender']) : print(\"Participant %s, age %s, gender %s\" %(participant,pheno.loc[participant,'Age'],pheno.loc[participant,'Gender'])) #", "'Light Yellow Cheese', 'Red pepper', 'Bagel', 'Entrecote', 'Cottage cheese', 'Oil', 'Natural Yogurt', 'Walnuts',", "+= ['Is pregnant','Is breastfeeding','Is after birth', 'Taking contraceptives', 'Regular period', 'Irregular period', 'No", "'Cooked Vegetable Salads Freq', 'Pickled Vegetables Freq', 'Olives Freq'] if 'womenOnlyQuestions' in args:", "'meatProducts' in args: mb_columns_extra += ['Egg Recipes Freq', 'Egg, Hard Boiled or Soft", "kwargs: if kwargs['taxa'][0]=='*': kwargs['taxa']=[initial+kwargs['taxa'][1:] for initial in ('s_', 'g_', 'f_', 'o_', 'c_', 'p_')]", "right_index=True) pheno_s = pheno[[c for c in pheno.columns if c[:2]=='s_']] pheno_g = pheno[[c", "'IsGenotek' in args: assert 'covars' not in args, 'IsGenotek and covars are mutually", "cabbage', 'Olives', 'Balsamic vinegar', 'Peach', 'Light Yellow Cheese', 'Red pepper', 'Bagel', 'Entrecote', 'Cottage", "print(pheno.columns.values.tolist()) print(pheno.shape) sum=0 for participant in pheno[['Age','Gender']].index.values: # if np.isnan(pheno.loc[participant,'Calories_kcal']) or \\ #", "'D.GI','D.Thyroid', 'D.NSAID','D.Contraception'] meals=['Vodka or Arak', 'Avocado', 'Parsley', 'Coated peanuts', 'Sugar', 'Smoked Salmon', 'Melon',", "not in ['c_', 'g_', 'o_', 's_', 'k_', 'p_', 'f_']): print c alpha_diversity_s =", "initial in ('s_', 'g_', 'f_', 'o_', 'c_', 'p_')] elif kwargs['taxa'][1]=='_': kwargs['taxa']=[kwargs['taxa']] for taxa", "Freq', 'Coffee Freq', 'Herbal Tea Freq', 'Green Tea Freq', 'Regular Tea Freq', 'Beer", "val in mb_columns_extra] mb_columns+=mb_columns_extra if 'meals' in args: mealsColumns=[val.replace(' ','_') for val in", "if 's_stats_pheno' in args: s_stats=['BMI','Cholesterol,total','WakeupGlucose','Albumin','Creatinine','HbA1C%','Height','Hips','Waist','WHR','HDLCholesterol'] #, 'Triglycerides', 'LDLCholesterol'] mb_columns+=s_stats mb_columns=list(set(mb_columns)) if 'fid' in", "'Chocolate Mousse Cake', 'Sweet Roll', 'Danish', 'Coffee', 'Pasta Salad', 'Cuba', 'Chicken Liver', 'Sweet", "if 's' in args: mb_columns += [c for c in pheno.columns if c[:2]=='s_'", "'Juice Freq', 'Diet Soda Freq', 'Regular Sodas with Sugar Freq', 'Decaffeinated Coffee Freq',", "args=list(args)+['s','g','f','o','c','p'] if 's' in args: mb_columns += [c for c in pheno.columns if", "'Red Pepper Freq', 'Green Pepper Freq', 'Cucumber Freq', 'Zucchini or Eggplant Freq','Peas, Green", "arg in drug_args: if arg in args: mb_columns += [arg] mb_columns_extra=[val.replace(' ','_') for", "has no 'Calories_kcal','Carbs_g','Fat_g','Protain_g' #3 participant 86356,762339,805175 have no 'Age','Gender' # if set(['Age','Gender','Calories_kcal','Carbs_g','Fat_g','Protain_g','IsGenotek'])<=set(pheno.columns.values): keep_inds=pheno.loc[:,['Age','Gender','Calories_kcal','Carbs_g','Fat_g','Protain_g','IsGenotek']].replace(-9,", "np.isnan(pheno.loc[participant,'Calories_kcal']) or \\ # np.isnan(pheno.loc[participant,'Carbs_g']) or \\ # np.isnan(pheno.loc[participant,'Fat_g']) or \\ # np.isnan(pheno.loc[participant,'Protain_g']):", "Freq', 'Nuts, almonds, pistachios Freq','Peanuts Freq'] if 'vegetables' in args: mb_columns_extra += ['Tomato", "Clementine Freq', 'Orange or Grapefruit Freq', 'Orange or Grapefruit Juice Freq', 'Apple Freq',", "or Red Cabbage Freq', 'Lettuce Freq','Carrots, Fresh or Cooked, Carrot Juice Freq', 'Corn", "mb_columns=list(set(mb_columns)) if 'fid' in args: mb_columns = ['FID']+mb_columns ########################FFQ START##################### if 'questionnaires' in", "'Chicken']#removed: u'Soda water',u'Water', u'Salt', known_args+= ffq_args known_args+= drug_args known_kwargs = ['ratio', 'threshold','taxa'] for", "c in pheno.columns if c[:2]=='g_']] ### for c in pheno: ### if (c[:2]", "'Mango', 'Lasagna', 'Popcorn', 'Hummus Salad', 'Tilapia', 'Pizza', 'Fried cauliflower', 'Roasted eggplant', 'Baguette', 'Lentil", "Light Freq', 'Thousand Island Dressing, Garlic Dressing Freq', 'Honey, Jam, fruit syrup, Maple", "'Halva', 'Yellow pepper', 'Mango', 'Lasagna', 'Popcorn', 'Hummus Salad', 'Tilapia', 'Pizza', 'Fried cauliflower', 'Roasted", "Hunger','Morning Hunger', 'Midday Hunger', 'Evening Hunger'] if 'legumes' in args: mb_columns_extra += ['Falafel", "Sugar Freq', 'Artificial Sweeteners Freq',] if 'drinks' in args: mb_columns_extra += ['Nectar, Cider", "and ('PCs' not in args): features_to_drop+=['PC1','PC2','PC3','PC4','PC5'] pheno=pheno.drop(features_to_drop,axis=1) if ('-9' not in args): pheno.replace(-9,", "if (c[:2] not in ['c_', 'g_', 'o_', 's_', 'k_', 'p_', 'f_']): print c", "'Almonds', 'Falafel', 'Noodles', 'Jachnun', 'Turkey', 'Sushi', 'Brazil nuts', 'Orange', 'Rice', 'Diet Fruit Drink',", "['T1Activity kind','Type 1 activity - freq','T2Activity kind', 'Type 2 activity - freq','T3Activity kind','Type", "if 'include_allPNP'in args: status, output = subprocess.getstatusoutput(\"cut -f 1 %s -d ' '", "in args: mb_columns_extra += ['Mandarin or Clementine Freq', 'Orange or Grapefruit Freq', 'Orange", "'Salty Snacks Freq', 'Cheese Cakes or Cream Cakes Freq', 'Yeast Cakes and Cookies", "'Alcoholic Drinks Freq'] if 'fruits' in args: mb_columns_extra += ['Mandarin or Clementine Freq',", "Freq', 'Cooked Legumes Freq', 'Processed Meat Free Products Freq'] if 'meatProducts' in args:", "Freq', 'Brussels Sprouts, Green or Red Cabbage Freq', 'Lettuce Freq','Carrots, Fresh or Cooked,", "Pita version 2 Freq','Processed Meat Products Freq','Beef, Veal, Lamb, Pork, Steak, Golash Freq',", "'Pear', 'Peas', 'Pecan', 'Cooked cauliflower', 'Cooked Sweet potato', 'Butter', 'Omelette', 'Coated Wafers', 'Boiled", "phenoAll = extract('s','include_allPNP') print(phenoAll.shape) print(phenoAll.columns) phenoChip = extract('keep_household','s','keep_related') print(phenoChip.shape) print(phenoChip.columns) # print \"Only", "'Dried dates', 'Carrot Cake', 'Doritos', 'Israeli couscous', 'Pistachio', 'Date honey', 'Vinaigrette', 'Bamba', 'Dark", "'Croissant', 'Biscuit', 'Potato chips', 'White Cheese', 'French fries', 'Wholemeal Bread', 'Tuna Salad', 'Chocolate", "Dark Chocolate Freq', 'Salty Snacks Freq', 'Cheese Cakes or Cream Cakes Freq', 'Yeast", "'g_', 'o_', 's_', 'k_', 'p_', 'f_']): print c alpha_diversity_s = (pheno_s>pheno_s.min().min()).sum(axis=1) alpha_diversity_g =", "is why we remove him\" features_to_drop=[] if ('IsGenotek' not in args) and ('covars'", "in args): features_to_drop+=['PC1','PC2','PC3','PC4','PC5'] pheno=pheno.drop(features_to_drop,axis=1) if ('-9' not in args): pheno.replace(-9, np.nan, inplace=True) if", "Burgul, Mamaliga, Groats Freq', 'Potatoes Boiled, Baked, Mashed, Potatoes Salad Freq', 'Fries Freq',", "'Butter', 'Omelette', 'Coated Wafers', 'Boiled corn', 'Chicken drumstick', 'Pita', 'Pasta Bolognese', 'Chicken Meatballs',", "Turkey Breast Freq', 'Turkey Meatballs, Beef, Chicken Freq', 'Shish Kebab in Pita Bread", "fries', 'Wholemeal Bread', 'Tuna Salad', 'Chocolate spread', 'Kebab', 'Rice crackers', 'Wafers', 'Lettuce', 'Rice", "2 activity - freq','T3Activity kind','Type 3 activity - freq'] if 'bloodType' in args:", "Freq', 'Lettuce Freq','Carrots, Fresh or Cooked, Carrot Juice Freq', 'Corn Freq', 'Parsley, Celery,", "RH-'] if 'cereals' in args: mb_columns_extra += ['Cornflakes Freq','Granola or Bernflaks Freq','Cooked Cereal", "'Chinese Chicken Noodles', 'Shakshouka', 'Tahini', 'Chicken breast', 'Steak', 'Light Bread', 'Wholemeal Crackers', 'Sugar", "if c[:2]=='p_' ] if 'otu' in args: mb_columns += [c for c in", "for c in pheno.columns if c[:2]=='f_' ] if 'o' in args: mb_columns +=", "with tomato sauce', 'Chicken']#removed: u'Soda water',u'Water', u'Salt', known_args+= ffq_args known_args+= drug_args known_kwargs =", "Biscuits Freq', 'Simple Cookies or Biscuits Freq', 'Ice Cream or Popsicle which contains", "'p_')] elif kwargs['taxa'][1]=='_': kwargs['taxa']=[kwargs['taxa']] for taxa in kwargs['taxa']: taxadf=pheno.filter(regex=(taxa)) mb_columns += taxadf.columns.values.tolist() if", "'Watermelon Freq', 'Dried Fruits Freq', 'Fruit Salad Freq'] if 'hunger' in args: mb_columns_extra", "Cakes Freq', 'Yeast Cakes and Cookies as Rogallach, Croissant or Donut Freq', 'Cake,", "in args: mb_columns_extra += ['Cornflakes Freq','Granola or Bernflaks Freq','Cooked Cereal such as Oatmeal", "= pandas.read_csv(glycemicStatusPath).set_index('RegNum') pheno = df_glucose.merge(pheno, left_index=True, right_index=True,how='right') mb_columns +=['median_Without_BMI_ALT_Overall','WakeupGlucose','BMI','VegeterianScale'] pheno.loc[pheno['VegeterianScale']<0, 'VegeterianScale']=np.nan if 'drugs'", "if 'vegetables' in args: mb_columns_extra += ['Tomato Freq','Cooked Tomatoes, Tomato Sauce, Tomato Soup", "c[:4]=='OTU_' ] if 'no_log' in args: assert 'dic' not in args, 'dic and", "'dic' not in args, '16s and dic are mutually exclusive' if ('taxa' in", "'Chicken drumstick', 'Pita', 'Pasta Bolognese', 'Chicken Meatballs', 'Burekas', 'Carrots', 'Tofu', 'Wholemeal Pita', 'Sunflower", "import ForPaper.VertexCut as vc df_household = pandas.read_csv(os.path.join(cleanDataPath, 'EnvironmentBlock.txt'), delim_whitespace=True) df_household = df_household[[c for", "corn', 'Chicken drumstick', 'Pita', 'Pasta Bolognese', 'Chicken Meatballs', 'Burekas', 'Carrots', 'Tofu', 'Wholemeal Pita',", "%s'%(arg) for kwarg in list(kwargs.keys()): assert kwarg in known_kwargs, 'unkown kwarg: %s'%(kwarg) if", "Milk Yogurt', 'Orange juice', 'Potatoes', 'Halva', 'Yellow pepper', 'Mango', 'Lasagna', 'Popcorn', 'Hummus Salad',", "10**pheno[mb_columns] if 'all_non_bac' in args: args=list(args)+['covars','blood','glucose','ffq','antropo'] mb_columns += ['Age','Gender','Calories_kcal','Carbs_g','Fat_g','Protain_g','IsGenotek'] if 'include_allPNP' not in", "# np.isnan(pheno.loc[participant,'Fat_g']) or \\ # np.isnan(pheno.loc[participant,'Protain_g']): # sum+=1 # print participant # print", "'Peach', 'Light Yellow Cheese', 'Red pepper', 'Bagel', 'Entrecote', 'Cottage cheese', 'Oil', 'Natural Yogurt',", "if kwargs['taxa'][0]=='*': kwargs['taxa']=[initial+kwargs['taxa'][1:] for initial in ('s_', 'g_', 'f_', 'o_', 'c_', 'p_')] elif", "= ['dic', 'all_bac', 's', 'g','f','o','c','p','otu', 'all_non_bac', 'covars', 'blood', 'glucose', 'ffq', 'antropo', 's_stats_pheno', 'fid',", "exclusive with all_bac,s,g,f,o,c,p,otu' if 'include_allPNP' in args: assert 'dic' not in args, 'include_allPNP", "or Bernflaks Freq','Cooked Cereal such as Oatmeal Porridge Freq', 'Rice Freq','Couscous, Burgul, Mamaliga,", "'D.NSAID','D.Contraception'] meals=['Vodka or Arak', 'Avocado', 'Parsley', 'Coated peanuts', 'Sugar', 'Smoked Salmon', 'Melon', 'Roll',", "known_args+= drug_args known_kwargs = ['ratio', 'threshold','taxa'] for arg in args: assert arg in", "exclusive' if 'otu' in args: assert '16s' in args if 'dic' in args:", "Freq', 'Saltine Crackers or Matzah Freq', 'Wholemeal Crackers Freq', 'Small Burekas Freq', 'Jachnun,", "= subprocess.getstatusoutput(\"cut -f 1 %s -d ' ' | cut -f 1 -d", "u'Salt', known_args+= ffq_args known_args+= drug_args known_kwargs = ['ratio', 'threshold','taxa'] for arg in args:", "'Walnuts', 'Edamame', 'Majadra', 'Oatmeal', 'Soy sauce', 'Strawberry', 'Pastrami', 'Lemonade', 'Pasta with tomato sauce',", "=pheno[~pheno.index.isin([int(dafook) for dafook in output.split('\\n')])] if ('16s' in args): pheno = pheno[[c for", "print c alpha_diversity_s = (pheno_s>pheno_s.min().min()).sum(axis=1) alpha_diversity_g = (pheno_g>pheno_g.min().min()).sum(axis=1) pheno.loc[pheno.Hips==-9, 'WHR'] = np.nan pheno.loc[pheno.Waist==-9,", "all_bac,s,g,f,o,c,p,otu' if 'include_allPNP' in args: assert 'dic' not in args, 'include_allPNP does not", "in args: args=list(args)+['covars','blood','glucose','ffq','antropo'] mb_columns += ['Age','Gender','Calories_kcal','Carbs_g','Fat_g','Protain_g','IsGenotek'] if 'include_allPNP' not in args or ('PCs')", "in pheno.columns if c[:2]=='f_' ] if 'o' in args: mb_columns += [c for", "'Green Pepper Freq', 'Cucumber Freq', 'Zucchini or Eggplant Freq','Peas, Green Beans or Okra", "Cheese', 'Tea', 'Beer', 'Mozzarella Cheese', 'Fried onions', 'Ice cream', 'Cream Cake', 'Green cabbage',", "covars!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\" #print beforeNumParticpants-afterNumParticpants #print \"805175 has no FFQ!!!!! that is why we remove", "mb_columns +=['median_Without_BMI_ALT_Overall','WakeupGlucose','BMI','VegeterianScale'] pheno.loc[pheno['VegeterianScale']<0, 'VegeterianScale']=np.nan if 'drugs' in args: mb_columns+=drug_args else: for arg in", "if ('IsGenotek' not in args) and ('covars' not in args) and ('covars_noPCs' not", "pheno.loc[participant,['Calories_kcal','Carbs_g','Fat_g','Protain_g','Protain_g']] # print sum if np.isnan(pheno.loc[participant,'Age']) or np.isnan(pheno.loc[participant,'Gender']) : print(\"Participant %s, age %s,", "c[:2]=='g_']] ### for c in pheno: ### if (c[:2] not in ['c_', 'g_',", "in ['s_','g_','f_','o_','c_','p_']] other_columns = [c for c in pheno.columns if c[:2] not in", "for c in pheno.columns if c[:2]=='g_']] ### for c in pheno: ### if", "'phylum']: df_taxa = pandas.read_csv(os.path.join(PNP_16S_DIR, taxa_level+'.txt'), sep='\\t', index_col=0) df_taxa[df_taxa<1e-3] = 1e-4 df_taxa = np.log10(df_taxa)", "#3 participant 86356,762339,805175 have no 'Age','Gender' # if set(['Age','Gender','Calories_kcal','Carbs_g','Fat_g','Protain_g','IsGenotek'])<=set(pheno.columns.values): keep_inds=pheno.loc[:,['Age','Gender','Calories_kcal','Carbs_g','Fat_g','Protain_g','IsGenotek']].replace(-9, np.nan).dropna().index.values beforeNumParticpants=pheno.shape[0] pheno=pheno.loc[keep_inds]", "Bread Freq', 'Cooked Legumes Freq', 'Processed Meat Free Products Freq'] if 'meatProducts' in", "Salad', 'Chocolate spread', 'Kebab', 'Rice crackers', 'Wafers', 'Lettuce', 'Rice Noodles', 'Lentils', 'Mutton', 'Wholemeal", "mb_columns_extra += ['Nectar, Cider Freq', 'Diet Juice Freq', 'Juice Freq', 'Diet Soda Freq',", "if 'f' in args: mb_columns += [c for c in pheno.columns if c[:2]=='f_'", "'RDW','Sodium','TSH','WBC','AlkalinePhosphatase','GGT','LDH','Iron','LDLCholesterol','Magnesium', # 'Triglycerides','TotalProtein','TotalBilirubin','Urea'] mb_columns += ['ALT','Albumin','AST','Basophils%','Calcium','Chloride','Cholesterol,total','Creatinine', 'CRP(WIDERANGE)','CRPhs','Eosinophils%','HCT','HDLCholesterol','Hemoglobin','HbA1C%','Lymphocytes%', 'MCH','MCHC','MCV','MPV','Monocytes%','Neutrophils%','Phosphorus','Platelets','Potassium','RBC', 'RDW','Sodium','TSH','WBC','LDLCholesterol'] if 'glucose' in args:", "Chocolate', 'Turkey Shawarma', 'Olive oil', #u'Parmesan\\xc2\\xa0cheese', 'Guacamole', 'Coleslaw', 'Americano', 'Pesek Zman snack', 'Green", "[c for c in pheno.columns if c[:2]=='s_' ] if 'g' in args: mb_columns", "fly import ForPaper.VertexCut as vc df_household = pandas.read_csv(os.path.join(cleanDataPath, 'EnvironmentBlock.txt'), delim_whitespace=True) df_household = df_household[[c", "### for c in pheno: ### if (c[:2] not in ['c_', 'g_', 'o_',", "pepper', 'Mango', 'Lasagna', 'Popcorn', 'Hummus Salad', 'Tilapia', 'Pizza', 'Fried cauliflower', 'Roasted eggplant', 'Baguette',", "in args: mb_columns = ['FID']+mb_columns ########################FFQ START##################### if 'questionnaires' in args: args=list(args)+ffq_args mb_columns_extra=[]", "in args: mb_columns_extra += ['Milk or Dark Chocolate Freq', 'Salty Snacks Freq', 'Cheese", "Butter', 'Cooked carrots', 'Lemon', 'Salty Cookies', 'Beef', 'Meatballs', 'Hamburger sandwich', 'Chicken thighs', 'Granola',", "'Pizza Freq'] if 'qualityOfLiving' in args: mb_columns_extra += ['Stress','Sleep quality'] if 'smoking' in", "missing covars!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\" #print beforeNumParticpants-afterNumParticpants #print \"805175 has no FFQ!!!!! that is why we", "or Smoked Turkey Breast Freq', 'Turkey Meatballs, Beef, Chicken Freq', 'Shish Kebab in", "if c[:2]=='s_']] pheno_g = pheno[[c for c in pheno.columns if c[:2]=='g_']] ### for", "'Rice Noodles', 'Lentils', 'Mutton', 'Wholemeal Noodles', 'Green Tea', 'Schnitzel', 'Brown Sugar', 'Peanuts', 'Mayonnaise',", "for c in pheno.columns if c[:2]=='o_' ] if 'c' in args: mb_columns +=", "Without Dairy Freq', 'Black or White Grains, Watermelon Seeds Freq', 'Nuts, almonds, pistachios", "########################FFQ START##################### if 'questionnaires' in args: args=list(args)+ffq_args mb_columns_extra=[] if 'activity' in args: mb_columns_extra", "Biscuits Freq', 'Ice Cream or Popsicle which contains Dairy Freq', 'Popsicle Without Dairy", "'Fresh Vegetable Salad Without Dressing or Oil Freq', 'Fresh Vegetable Salad With Dressing", "right_index=True,how='right') mb_columns +=['median_Without_BMI_ALT_Overall','WakeupGlucose','BMI','VegeterianScale'] pheno.loc[pheno['VegeterianScale']<0, 'VegeterianScale']=np.nan if 'drugs' in args: mb_columns+=drug_args else: for arg", "' | cut -f 1 -d '_'\"%os.path.join(rawDataPath,'tmp','dfukim.txt')) pheno =pheno[~pheno.index.isin([int(dafook) for dafook in output.split('\\n')])]", "'Pear Fresh, Cooked or Canned Freq','Persimmon Freq', 'Watermelon Freq', 'Dried Fruits Freq', 'Fruit", "args): features_to_drop+=['PC1','PC2','PC3','PC4','PC5'] pheno=pheno.drop(features_to_drop,axis=1) if ('-9' not in args): pheno.replace(-9, np.nan, inplace=True) if 'permute'", "or Canned Freq','Persimmon Freq', 'Watermelon Freq', 'Dried Fruits Freq', 'Fruit Salad Freq'] if", "+= [c for c in pheno.columns if c[:2]=='p_' ] if 'otu' in args:", "not in ('s_', 'g_', 'f_', 'o_', 'c_', 'p_')]] for taxa_level in ['otu', 'species',", "'Raisins', 'Chocolate', 'Quinoa', 'Cooked broccoli', 'Beef Cholent', 'Cracker', 'Chocolate Cookies', 'White beans', 'Cooked", "and covars are mutually exclusive' if 'otu' in args: assert '16s' in args", "onions', 'Mushrooms', 'Lemon juice', 'Canned Tuna Fish', 'Vegetable Salad', 'Fried eggplant', 'Salmon', 'Cashew',", "args: mb_columns += ['95P_Glucose','Glucose_Noise','Max_Glucose','Median_Glucose','WakeupGlucose', 'MeanGlucoseResponse','MeanBreadResponse','MeanBreadButterResponse'] if 'ffq' in args: mb_columns += ['Alcoholic_Drinks_Freq','Cigarretes_per_day','Coffee_Freq','Start_smoking_age'] if", "Freq', 'Grapes or Raisins Freq', 'Banana Freq', 'Melon Freq', 'Kiwi or Strawberries Freq',", "water',u'Water', u'Salt', known_args+= ffq_args known_args+= drug_args known_kwargs = ['ratio', 'threshold','taxa'] for arg in", "in args: #One participant 244624 has no 'Calories_kcal','Carbs_g','Fat_g','Protain_g' #3 participant 86356,762339,805175 have no", "in df_household.columns if int(c) in pheno.index]] df_household = df_household[df_household.index.isin(pheno.index)] remove_inds = df_household.index[vc.VertexCut().work(df_household.values, 0.5)]", "'keep_household', 'no_log', 'keep_related', 'keep_sterile', '16s', '-9', 'covars_noPCs', 'PCs', 'lactose','include_allPNP','IsGenotek','permute','meals','other','drugs', 'calories','bloodType','questionnaires','keep_missingCovars','activity','activityTypesFreq','cereals', 'delivery','dressSweetners','drinks','fruits','hunger', 'legumes','meatProducts','pastry','qualityOfLiving', 'smoking','sweets','vegetables','womenOnlyQuestions',", "or Cake Freq', 'Coated or Stuffed Cookies, Waffles or Biscuits Freq', 'Simple Cookies", "Freq'] if 'qualityOfLiving' in args: mb_columns_extra += ['Stress','Sleep quality'] if 'smoking' in args:", "pheno =pheno[~pheno.index.isin([int(dafook) for dafook in output.split('\\n')])] if ('16s' in args): pheno = pheno[[c", "Hard Boiled or Soft Freq', 'Schnitzel Turkey or Chicken Freq', 'Chicken or Turkey", "pheno.loc[pheno['VegeterianScale']<0, 'VegeterianScale']=np.nan if 'drugs' in args: mb_columns+=drug_args else: for arg in drug_args: if", "'g_', 'f_', 'o_', 'c_', 'p_')] elif kwargs['taxa'][1]=='_': kwargs['taxa']=[kwargs['taxa']] for taxa in kwargs['taxa']: taxadf=pheno.filter(regex=(taxa))", "which contains Dairy Freq', 'Popsicle Without Dairy Freq', 'Black or White Grains, Watermelon", "'Wafers', 'Lettuce', 'Rice Noodles', 'Lentils', 'Mutton', 'Wholemeal Noodles', 'Green Tea', 'Schnitzel', 'Brown Sugar',", "('keep_sterile') not in args: if '16s' in args: sterile_individuals = alpha_diversity_g[alpha_diversity_g < 4].index", "Freq', 'Pastrami or Smoked Turkey Breast Freq', 'Turkey Meatballs, Beef, Chicken Freq', 'Shish", "Fish', 'Vegetable Salad', 'Fried eggplant', 'Salmon', 'Cashew', 'Jewish donut', 'Rugelach', 'Cake', 'Ravioli', 'Tomatoes',", "'CRP(WIDERANGE)','CRPhs','Eosinophils%','HCT','HDLCholesterol','Hemoglobin','HbA1C%','Lymphocytes%', # 'MCH','MCHC','MCV','MPV','Monocytes%','Neutrophils%','Phosphorus','Platelets','Potassium','RBC', # 'RDW','Sodium','TSH','WBC','AlkalinePhosphatase','GGT','LDH','Iron','LDLCholesterol','Magnesium', # 'Triglycerides','TotalProtein','TotalBilirubin','Urea'] mb_columns += ['ALT','Albumin','AST','Basophils%','Calcium','Chloride','Cholesterol,total','Creatinine', 'CRP(WIDERANGE)','CRPhs','Eosinophils%','HCT','HDLCholesterol','Hemoglobin','HbA1C%','Lymphocytes%', 'MCH','MCHC','MCV','MPV','Monocytes%','Neutrophils%','Phosphorus','Platelets','Potassium','RBC', 'RDW','Sodium','TSH','WBC','LDLCholesterol']", "args: pheno=pandas.DataFrame(pheno.values[np.random.permutation(pheno.shape[0])],index=pheno.index,columns=pheno.columns) return pheno if __name__==\"__main__\": # pheno=extract('dic','covars','keep_household',\"pastry\",ratio=0.2)#'all_bac' phenoAll = extract('s','include_allPNP','covars')#'include_allPNP','keep_household','ffq','keep_related')#'include_allPNP', print(phenoAll.shape) print(phenoAll.columns)", "'keep_sterile', '16s', '-9', 'covars_noPCs', 'PCs', 'lactose','include_allPNP','IsGenotek','permute','meals','other','drugs', 'calories','bloodType','questionnaires','keep_missingCovars','activity','activityTypesFreq','cereals', 'delivery','dressSweetners','drinks','fruits','hunger', 'legumes','meatProducts','pastry','qualityOfLiving', 'smoking','sweets','vegetables','womenOnlyQuestions', 'genotek_only', 'swab_only'] ffq_args", "import Bed import subprocess cleanDataPath='/net/mraid08/export/jafar/Microbiome/Analyses/PNPChip/cleanData/' rawDataPath='/net/mraid08/export/jafar/Microbiome/Analyses/PNPChip/rawData' pheno_fn_bac =os.path.join(cleanDataPath,'noMissingKakiPhenotypesWithCovariates_nodfukim.phenotypes') #pheno_fn_bac =os.path.join(cleanDataPath,'allChipPhenotypes_nodfukimWith5PCair.phenotypes') pheno_fn_bacDic=os.path.join(cleanDataPath,'dicNoMissingKakiPhenotypesWithCovariates_nodfukim.phenotypes') pheno_fn_bacAllPNP=os.path.join(rawDataPath,'allPNPPhenotypes.phenotypes') iidsNoSharedEnv='/net/mraid08/export/genie/Microbiome/Analyses/PNPChip/cleanData/PNP_autosomal_clean2_nodfukim_NoCouples.txt'", "Cookies or Biscuits Freq', 'Ice Cream or Popsicle which contains Dairy Freq', 'Popsicle", "'Avocado Freq','Lemon Freq', 'Onion Freq', 'Garlic Freq', 'Vegetable Soup Freq', 'Hummus Salad Freq',", "- freq','T3Activity kind','Type 3 activity - freq'] if 'bloodType' in args: mb_columns_extra +=", "participant 244624 has no 'Calories_kcal','Carbs_g','Fat_g','Protain_g' #3 participant 86356,762339,805175 have no 'Age','Gender' # if", "sum=0 for participant in pheno[['Age','Gender']].index.values: # if np.isnan(pheno.loc[participant,'Calories_kcal']) or \\ # np.isnan(pheno.loc[participant,'Carbs_g']) or", "args: mb_columns_extra += ['Work activity','Physical activity - mins','Physical activity - freq'] if 'activityTypesFreq'", "'Fish Cooked, Baked or Grilled Freq', 'Fried Fish Freq', 'Canned Tuna or Tuna", "mb_columns += [c for c in pheno.columns if c[:2]=='c_' ] if 'p' in", "Products Freq'] if 'meatProducts' in args: mb_columns_extra += ['Egg Recipes Freq', 'Egg, Hard", "Freq', 'Cauliflower or Broccoli Freq','Sweet Potato Freq', 'Brussels Sprouts, Green or Red Cabbage", "has no FFQ!!!!! that is why we remove him\" features_to_drop=[] if ('IsGenotek' not", "left_index=True, right_index=True) pheno_s = pheno[[c for c in pheno.columns if c[:2]=='s_']] pheno_g =", "'Cream Cheese', 'Chicken soup', 'Wholemeal Roll', 'Canned corn', 'Salty Cheese', 'Melawach', 'White cake',", "in args) and ('covars' not in args) and ('covars_noPCs' not in args): features_to_drop", "Cheese', 'Jam', 'Feta Cheese', 'Mandarin', 'Pesto', 'Sugar substitute', 'Cheesecake', 'Raisins', 'Chocolate', 'Quinoa', 'Cooked", "Turkey With Skin Freq', 'Chicken or Turkey Without Skin Freq', 'Sausages Freq', 'Sausages", "not in args: #noSharedEnvIID=pandas.read_csv(iidsNoSharedEnv,usecols=[0],header=None,sep='\\t') #pheno=pheno[pheno.index.isin(noSharedEnvIID[0].astype(int).values)] #new code that decides which individuals to remove", "c[:2]=='f_' ] if 'o' in args: mb_columns += [c for c in pheno.columns", "Chocolate Freq', 'Salty Snacks Freq', 'Cheese Cakes or Cream Cakes Freq', 'Yeast Cakes", "0.5)] pheno=pheno[~pheno.index.isin(remove_inds)] if 'keep_missingCovars' not in args: #One participant 244624 has no 'Calories_kcal','Carbs_g','Fat_g','Protain_g'", "features_to_drop +=['Age','Gender','Calories_kcal','Carbs_g','Fat_g','Protain_g','IsGenotek'] else: features_to_drop +=['Age','Gender'] if ('include_allPNP' not in args) and ('PCs' not", "'include_allPNP' in args: pheno =pandas.read_csv(pheno_fn_bacAllPNP,sep='\\t') else: pheno =pandas.read_csv(pheno_fn_bac,sep='\\t') pheno.set_index('IID', inplace=True, drop=True) if 'include_allPNP'in", "c[:2]=='s_' ] if 'g' in args: mb_columns += [c for c in pheno.columns", "subprocess cleanDataPath='/net/mraid08/export/jafar/Microbiome/Analyses/PNPChip/cleanData/' rawDataPath='/net/mraid08/export/jafar/Microbiome/Analyses/PNPChip/rawData' pheno_fn_bac =os.path.join(cleanDataPath,'noMissingKakiPhenotypesWithCovariates_nodfukim.phenotypes') #pheno_fn_bac =os.path.join(cleanDataPath,'allChipPhenotypes_nodfukimWith5PCair.phenotypes') pheno_fn_bacDic=os.path.join(cleanDataPath,'dicNoMissingKakiPhenotypesWithCovariates_nodfukim.phenotypes') pheno_fn_bacAllPNP=os.path.join(rawDataPath,'allPNPPhenotypes.phenotypes') iidsNoSharedEnv='/net/mraid08/export/genie/Microbiome/Analyses/PNPChip/cleanData/PNP_autosomal_clean2_nodfukim_NoCouples.txt' PNP_16S_DIR = '/net/mraid08/export/jafar/Microbiome/Analyses/PNPChip/16S'", "-4 else: threshold=kwargs['threshold'] if 'ratio' in kwargs: ratio=kwargs['ratio'] mb_columns = [c for c", "assert '16s' in args if 'dic' in args: pheno =pandas.read_csv(pheno_fn_bacDic,sep='\\t') pheno.set_index('IID', inplace=True, drop=True)", "args): assert 'dic' not in args, '16s and dic are mutually exclusive' if", "'Tea', 'Beer', 'Mozzarella Cheese', 'Fried onions', 'Ice cream', 'Cream Cake', 'Green cabbage', 'Olives',", "c[:2]=='p_' ] if 'otu' in args: mb_columns += [c for c in pheno.columns", "'Tzfatit Cheese', 'Nectarine', 'Chicken legs', 'Nuts', 'Goat Cheese', 'Jam', 'Feta Cheese', 'Mandarin', 'Pesto',", "Freq', 'Fish (not Tuna) Pickled, Dried, Smoked, Canned Freq'] if 'pastry' in args:", "args: mb_columns_extra += ['Falafel in Pita Bread Freq', 'Cooked Legumes Freq', 'Processed Meat", "+= [c for c in pheno.columns if c[:2]=='PC'] if 'lactose' in args: mb_columns", "'Chicken Liver', 'Sweet Challah', 'Minced meat', 'Chocolate cake', 'Diet Coke', 'Dried dates', 'Carrot", "'Noodles', 'Jachnun', 'Turkey', 'Sushi', 'Brazil nuts', 'Orange', 'Rice', 'Diet Fruit Drink', 'Corn schnitzel',", "Freq', 'Popsicle Without Dairy Freq', 'Black or White Grains, Watermelon Seeds Freq', 'Nuts,", "exclusive' pheno[mb_columns] = 10**pheno[mb_columns] if 'all_non_bac' in args: args=list(args)+['covars','blood','glucose','ffq','antropo'] mb_columns += ['Age','Gender','Calories_kcal','Carbs_g','Fat_g','Protain_g','IsGenotek'] if", "[c for c in pheno.columns if c[:2] not in ['s_','g_','f_','o_','c_','p_']] if 'dic' in", "drop=True) if 'include_allPNP'in args: status, output = subprocess.getstatusoutput(\"cut -f 1 %s -d '", "Noodles', 'Shakshouka', 'Tahini', 'Chicken breast', 'Steak', 'Light Bread', 'Wholemeal Crackers', 'Sugar Free Gum',", "'Parsley, Celery, Fennel, Dill, Cilantro, Green Onion Freq', 'Fresh Vegetable Salad Without Dressing", "if c[:2]=='o_' ] if 'c' in args: mb_columns += [c for c in", "in args): if 'calories' not in args: features_to_drop +=['Age','Gender','Calories_kcal','Carbs_g','Fat_g','Protain_g','IsGenotek'] else: features_to_drop +=['Age','Gender'] if", "as vc df_household = pandas.read_csv(os.path.join(cleanDataPath, 'EnvironmentBlock.txt'), delim_whitespace=True) df_household = df_household[[c for c in", "activity - freq','T2Activity kind', 'Type 2 activity - freq','T3Activity kind','Type 3 activity -", "snack', 'Green onions', 'Mushrooms', 'Lemon juice', 'Canned Tuna Fish', 'Vegetable Salad', 'Fried eggplant',", "Oil Freq', 'Avocado Freq','Lemon Freq', 'Onion Freq', 'Garlic Freq', 'Vegetable Soup Freq', 'Hummus", "'Tomatoes', 'Wholemeal Light Bread', 'Marble Cake', 'Brown Rice', 'Cold cut', 'Gilthead Bream', 'Garlic',", "'Apple juice', 'Stuffed Peppers', 'Egg', 'Pear', 'Peas', 'Pecan', 'Cooked cauliflower', 'Cooked Sweet potato',", "in args: if '16s' in args: sterile_individuals = alpha_diversity_g[alpha_diversity_g < 4].index else: sterile_individuals", "'Carrots', 'Tofu', 'Wholemeal Pita', 'Sunflower seeds', 'Coriander', 'Ciabatta', 'Tomato sauce', 'Heavy cream', 'Banana',", "Hunger', 'Midday Hunger', 'Evening Hunger'] if 'legumes' in args: mb_columns_extra += ['Falafel in", "meals] #Correct by total calories pheno.loc[:,mealsColumns]=pheno[mealsColumns][pheno[mealsColumns]!=-9].div(pheno['Calories_kcal_Total'].values,axis=0) pheno.replace(np.nan, 0,inplace=True) mb_columns += mealsColumns ########################FFQ END#####################", "Freq', 'Cooked Vegetable Salads Freq', 'Pickled Vegetables Freq', 'Olives Freq'] if 'womenOnlyQuestions' in", "args: mb_columns_extra += ['Stress','Sleep quality'] if 'smoking' in args: mb_columns_extra += ['Currently smokes','Ever", "mealsColumns ########################FFQ END##################### #for c in pheno: print c mb_columns=list(set(mb_columns)) pheno= pheno[mb_columns] if", "'Minced meat', 'Chocolate cake', 'Diet Coke', 'Dried dates', 'Carrot Cake', 'Doritos', 'Israeli couscous',", "extract(*args,**kwargs): known_args = ['dic', 'all_bac', 's', 'g','f','o','c','p','otu', 'all_non_bac', 'covars', 'blood', 'glucose', 'ffq', 'antropo',", "in ['s_','g_','f_','o_','c_','p_']] if 'dic' in args: presence=((pheno[mb_columns]>threshold +1e-5)&(pheno[mb_columns]!=0)).astype(int).sum() else: presence=(pheno[mb_columns]>threshold +1e-5).astype(int).sum() presence=presence[presence >", "in pheno.columns if c[:2]=='PC'] if 'lactose' in args: mb_columns += ['lactose'] if 'blood'", "for c in pheno_nodic.columns if c[:2]=='g_']] else: if 'include_allPNP' in args: pheno =pandas.read_csv(pheno_fn_bacAllPNP,sep='\\t')", "'antropo' in args: mb_columns += ['BMI','BPdia','BPsys','HeartRate','Height','Hips','WHR','Waist'] if 's_stats_pheno' in args: s_stats=['BMI','Cholesterol,total','WakeupGlucose','Albumin','Creatinine','HbA1C%','Height','Hips','Waist','WHR','HDLCholesterol'] #, 'Triglycerides',", "vegetables', 'Plum', 'Goat Milk Yogurt', 'Orange juice', 'Potatoes', 'Halva', 'Yellow pepper', 'Mango', 'Lasagna',", "mb_columns+=s_stats mb_columns=list(set(mb_columns)) if 'fid' in args: mb_columns = ['FID']+mb_columns ########################FFQ START##################### if 'questionnaires'", "'fruits' in args: mb_columns_extra += ['Mandarin or Clementine Freq', 'Orange or Grapefruit Freq',", "as Moussaka, Hamin, Cuba Freq', 'Mixed Chicken or Turkey Dishes Freq', 'Beef or", "Seeds Freq', 'Nuts, almonds, pistachios Freq','Peanuts Freq'] if 'vegetables' in args: mb_columns_extra +=", "'RDW','Sodium','TSH','WBC','LDLCholesterol'] if 'glucose' in args: mb_columns += ['95P_Glucose','Glucose_Noise','Max_Glucose','Median_Glucose','WakeupGlucose', 'MeanGlucoseResponse','MeanBreadResponse','MeanBreadButterResponse'] if 'ffq' in args:", "'Cucumber', 'Mung Bean', 'Ketchup', 'Sweet Yogurt', 'Bread', 'Onion', 'Cream Cheese', 'Chicken soup', 'Wholemeal", "- freq'] if 'activityTypesFreq' in args: mb_columns_extra += ['T1Activity kind','Type 1 activity -", "in args: assert arg in known_args, 'unkown arg: %s'%(arg) for kwarg in list(kwargs.keys()):", "'Popcorn', 'Hummus Salad', 'Tilapia', 'Pizza', 'Fried cauliflower', 'Roasted eggplant', 'Baguette', 'Lentil Soup', 'Tzfatit", "pheno_nodic.set_index('IID', inplace=True, drop=True) pheno_s = pheno_nodic[[c for c in pheno_nodic.columns if c[:2]=='s_']] pheno_g", "Freq', 'Shish Kebab in Pita Bread Freq', 'Falafel in Pita version 2 Freq','Processed", "pheno=pheno[other_columns+presence] if ('keep_related' not in args): #bed = Bed(os.path.join(cleanDataPath, 'PNP_autosomal_clean2_nodfukim_norelated'), count_A1=True)#.read() df_fam_no_related =", "'Baguette', 'Lentil Soup', 'Tzfatit Cheese', 'Nectarine', 'Chicken legs', 'Nuts', 'Goat Cheese', 'Jam', 'Feta", "mb_columns_extra += ['Milk or Dark Chocolate Freq', 'Salty Snacks Freq', 'Cheese Cakes or", "Salad', 'Chocolate Mousse Cake', 'Sweet Roll', 'Danish', 'Coffee', 'Pasta Salad', 'Cuba', 'Chicken Liver',", "= extract('s','include_allPNP') print(phenoAll.shape) print(phenoAll.columns) phenoChip = extract('keep_household','s','keep_related') print(phenoChip.shape) print(phenoChip.columns) # print \"Only in", "juice', 'Stuffed Peppers', 'Egg', 'Pear', 'Peas', 'Pecan', 'Cooked cauliflower', 'Cooked Sweet potato', 'Butter',", "threshold=kwargs['threshold'] if 'ratio' in kwargs: ratio=kwargs['ratio'] mb_columns = [c for c in pheno.columns", "'Taking contraceptives', 'Regular period', 'Irregular period', 'No period','Hormonal replacment', 'Past breastfeeding'] if 'other'", "'Black or White Grains, Watermelon Seeds Freq', 'Nuts, almonds, pistachios Freq','Peanuts Freq'] if", "pandas.read_csv(os.path.join(cleanDataPath, 'PNP_autosomal_clean2_nodfukim.fam'), delim_whitespace=True, index_col=0, header=None) df_related=df_fam[~df_fam.index.isin(df_fam_no_related.index)] pheno=pheno[(~pheno.index.isin(df_related.index))] if ('keep_sterile') not in args: if", "'p_')]] for taxa_level in ['otu', 'species', 'genus', 'family', 'order', 'class', 'phylum']: df_taxa =", "Without Dressing or Oil Freq', 'Fresh Vegetable Salad With Dressing or Oil Freq',", "Groats Freq', 'Potatoes Boiled, Baked, Mashed, Potatoes Salad Freq', 'Fries Freq', 'Pasta or", "'Artificial Sweeteners Freq',] if 'drinks' in args: mb_columns_extra += ['Nectar, Cider Freq', 'Diet", "args: # mb_columns += ['ALT','Albumin','AST','Basophils%','Calcium','Chloride','Cholesterol,total','Creatinine', # 'CRP(WIDERANGE)','CRPhs','Eosinophils%','HCT','HDLCholesterol','Hemoglobin','HbA1C%','Lymphocytes%', # 'MCH','MCHC','MCV','MPV','Monocytes%','Neutrophils%','Phosphorus','Platelets','Potassium','RBC', # 'RDW','Sodium','TSH','WBC','AlkalinePhosphatase','GGT','LDH','Iron','LDLCholesterol','Magnesium', # 'Triglycerides','TotalProtein','TotalBilirubin','Urea']", "chip:\" # print set(phenoChip.index.values)-set(phenoAll.index.values) # print len(set(phenoChip.index)-set(phenoAll.index)) print(pheno.columns.values.tolist()) print(pheno.shape) sum=0 for participant in", "'lactose' in args: mb_columns += ['lactose'] if 'blood' in args: # mb_columns +=", "'Herbal Tea Freq', 'Green Tea Freq', 'Regular Tea Freq', 'Beer Freq', 'Sweet Dry", "'Mustard', 'Coke', 'Vegetable Soup', 'Sausages', 'Pancake', 'Pasta', 'Sauteed vegetables', 'Plum', 'Goat Milk Yogurt',", "'D.Psychiatric', 'D.pain', 'D.CVD', 'D.GI','D.Thyroid', 'D.NSAID','D.Contraception'] meals=['Vodka or Arak', 'Avocado', 'Parsley', 'Coated peanuts', 'Sugar',", "['BMI','BPdia','BPsys','HeartRate','Height','Hips','WHR','Waist'] if 's_stats_pheno' in args: s_stats=['BMI','Cholesterol,total','WakeupGlucose','Albumin','Creatinine','HbA1C%','Height','Hips','Waist','WHR','HDLCholesterol'] #, 'Triglycerides', 'LDLCholesterol'] mb_columns+=s_stats mb_columns=list(set(mb_columns)) if 'fid'", "'No period','Hormonal replacment', 'Past breastfeeding'] if 'other' in args: #AddingIrisGlucose df_glucose = pandas.read_csv(glycemicStatusPath).set_index('RegNum')", "FFQ!!!!! that is why we remove him\" features_to_drop=[] if ('IsGenotek' not in args)", "Onion Freq', 'Fresh Vegetable Salad Without Dressing or Oil Freq', 'Fresh Vegetable Salad", "Challah Freq', 'Light Bread Freq', 'Wholemeal or Rye Bread Freq', 'Baguette Freq', 'Roll", "=os.path.join(cleanDataPath,'noMissingKakiPhenotypesWithCovariates_nodfukim.phenotypes') #pheno_fn_bac =os.path.join(cleanDataPath,'allChipPhenotypes_nodfukimWith5PCair.phenotypes') pheno_fn_bacDic=os.path.join(cleanDataPath,'dicNoMissingKakiPhenotypesWithCovariates_nodfukim.phenotypes') pheno_fn_bacAllPNP=os.path.join(rawDataPath,'allPNPPhenotypes.phenotypes') iidsNoSharedEnv='/net/mraid08/export/genie/Microbiome/Analyses/PNPChip/cleanData/PNP_autosomal_clean2_nodfukim_NoCouples.txt' PNP_16S_DIR = '/net/mraid08/export/jafar/Microbiome/Analyses/PNPChip/16S' glycemicStatusPath='/net/mraid08/export/jafar/Microbiome/Analyses/PNPChip/glycemic_status.csv' def extract(*args,**kwargs): known_args", "'Jachnun, Mlawah, Kubana, Cigars Freq', 'Pizza Freq'] if 'qualityOfLiving' in args: mb_columns_extra +=", "if 'drugs' in args: mb_columns+=drug_args else: for arg in drug_args: if arg in", "'Soy sauce', 'Strawberry', 'Pastrami', 'Lemonade', 'Pasta with tomato sauce', 'Chicken']#removed: u'Soda water',u'Water', u'Salt',", "or Donut Freq', 'Cake, Torte Cakes, Chocolate Cake Freq', 'Fruit Pie or Cake", "'Brown Rice', 'Cold cut', 'Gilthead Bream', 'Garlic', 'Grapes', 'Chocolate Chip Cookies', 'Cucumber', 'Mung", "assert arg in known_args, 'unkown arg: %s'%(arg) for kwarg in list(kwargs.keys()): assert kwarg", "features_to_drop += ['IsGenotek'] if ('covars' not in args) and ('covars_noPCs' not in args)", "np.nan pheno['LDLCholesterol'] = pheno['Cholesterol,total'] - pheno['HDLCholesterol'] - 2*pheno['Triglycerides'] if 'genotek_only' in args: pheno", "tomato sauce', 'Chicken']#removed: u'Soda water',u'Water', u'Salt', known_args+= ffq_args known_args+= drug_args known_kwargs = ['ratio',", "Nectarine, Plum Freq', 'Pear Fresh, Cooked or Canned Freq','Persimmon Freq', 'Watermelon Freq', 'Dried", "pandas.read_csv(glycemicStatusPath).set_index('RegNum') pheno = df_glucose.merge(pheno, left_index=True, right_index=True,how='right') mb_columns +=['median_Without_BMI_ALT_Overall','WakeupGlucose','BMI','VegeterianScale'] pheno.loc[pheno['VegeterianScale']<0, 'VegeterianScale']=np.nan if 'drugs' in", "'Onion', 'Cream Cheese', 'Chicken soup', 'Wholemeal Roll', 'Canned corn', 'Salty Cheese', 'Melawach', 'White", "'Cookies', 'Challah', 'Spelled', 'Honey', 'Green beans', 'Milk', 'Peanut Butter', 'Cooked carrots', 'Lemon', 'Salty", "'g_', 'f_', 'o_', 'c_', 'p_')]] for taxa_level in ['otu', 'species', 'genus', 'family', 'order',", "args: pheno =pandas.read_csv(pheno_fn_bacDic,sep='\\t') pheno.set_index('IID', inplace=True, drop=True) pheno_nodic =pandas.read_csv(pheno_fn_bac,sep='\\t') pheno_nodic.set_index('IID', inplace=True, drop=True) pheno_s =", "'Fried cauliflower', 'Roasted eggplant', 'Baguette', 'Lentil Soup', 'Tzfatit Cheese', 'Nectarine', 'Chicken legs', 'Nuts',", "= 1e-4 df_taxa = np.log10(df_taxa) pheno = pheno.merge(df_taxa, left_index=True, right_index=True) pheno_s = pheno[[c", "Chicken Freq', 'Shish Kebab in Pita Bread Freq', 'Falafel in Pita version 2", "# print sum if np.isnan(pheno.loc[participant,'Age']) or np.isnan(pheno.loc[participant,'Gender']) : print(\"Participant %s, age %s, gender", "beforeNumParticpants-afterNumParticpants>0: pass #print \"Removing participants with missing covars!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\" #print beforeNumParticpants-afterNumParticpants #print \"805175 has", "Freq', 'Onion Freq', 'Garlic Freq', 'Vegetable Soup Freq', 'Hummus Salad Freq', 'Tahini Salad", "'Kiwi or Strawberries Freq', 'Mango Freq', 'Peach, Nectarine, Plum Freq', 'Pear Fresh, Cooked", "or Stuffed Cookies, Waffles or Biscuits Freq', 'Simple Cookies or Biscuits Freq', 'Ice", "'Spelled', 'Honey', 'Green beans', 'Milk', 'Peanut Butter', 'Cooked carrots', 'Lemon', 'Salty Cookies', 'Beef',", "pheno=pheno[~pheno.index.isin(sterile_individuals)] if 'keep_household' not in args: #noSharedEnvIID=pandas.read_csv(iidsNoSharedEnv,usecols=[0],header=None,sep='\\t') #pheno=pheno[pheno.index.isin(noSharedEnvIID[0].astype(int).values)] #new code that decides which", "Turkey or Chicken Freq', 'Chicken or Turkey With Skin Freq', 'Chicken or Turkey", "Tuna or Tuna Salad Freq', 'Fish (not Tuna) Pickled, Dried, Smoked, Canned Freq']", "in args, 'dic and no_log are mutually exclusive' pheno[mb_columns] = 10**pheno[mb_columns] if 'all_non_bac'", "Fish Freq', 'Canned Tuna or Tuna Salad Freq', 'Fish (not Tuna) Pickled, Dried,", "\"Removing participants with missing covars!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\" #print beforeNumParticpants-afterNumParticpants #print \"805175 has no FFQ!!!!! that", "'glucose', 'ffq', 'antropo', 's_stats_pheno', 'fid', 'keep_household', 'no_log', 'keep_related', 'keep_sterile', '16s', '-9', 'covars_noPCs', 'PCs',", "status, output = subprocess.getstatusoutput(\"cut -f 1 %s -d ' ' | cut -f", "Fruits Freq', 'Fruit Salad Freq'] if 'hunger' in args: mb_columns_extra += ['General Hunger','Morning", "'Coleslaw', 'Americano', 'Pesek Zman snack', 'Green onions', 'Mushrooms', 'Lemon juice', 'Canned Tuna Fish',", "in chip:\" # print set(phenoChip.index.values)-set(phenoAll.index.values) # print len(set(phenoChip.index)-set(phenoAll.index)) print(pheno.columns.values.tolist()) print(pheno.shape) sum=0 for participant", "'Canned Tuna Fish', 'Vegetable Salad', 'Fried eggplant', 'Salmon', 'Cashew', 'Jewish donut', 'Rugelach', 'Cake',", "in args: mb_columns += [c for c in pheno.columns if c[:2]=='PC'] if 'lactose'", "c in pheno.columns if c[:2]=='c_' ] if 'p' in args: mb_columns += [c", "features_to_drop +=['Age','Gender'] if ('include_allPNP' not in args) and ('PCs' not in args): features_to_drop+=['PC1','PC2','PC3','PC4','PC5']", "args: args=list(args)+ffq_args mb_columns_extra=[] if 'activity' in args: mb_columns_extra += ['Work activity','Physical activity -", "participant in pheno[['Age','Gender']].index.values: # if np.isnan(pheno.loc[participant,'Calories_kcal']) or \\ # np.isnan(pheno.loc[participant,'Carbs_g']) or \\ #", "args: status, output = subprocess.getstatusoutput(\"cut -f 1 %s -d ' ' | cut", "= (pheno_g>pheno_g.min().min()).sum(axis=1) pheno.loc[pheno.Hips==-9, 'WHR'] = np.nan pheno.loc[pheno.Waist==-9, 'WHR'] = np.nan pheno['LDLCholesterol'] = pheno['Cholesterol,total']", "or Soft Freq', 'Schnitzel Turkey or Chicken Freq', 'Chicken or Turkey With Skin", "Sweet potato', 'Butter', 'Omelette', 'Coated Wafers', 'Boiled corn', 'Chicken drumstick', 'Pita', 'Pasta Bolognese',", "] if 'g' in args: mb_columns += [c for c in pheno.columns if", "Kef', 'Mustard', 'Coke', 'Vegetable Soup', 'Sausages', 'Pancake', 'Pasta', 'Sauteed vegetables', 'Plum', 'Goat Milk", "eggplant', 'Baguette', 'Lentil Soup', 'Tzfatit Cheese', 'Nectarine', 'Chicken legs', 'Nuts', 'Goat Cheese', 'Jam',", "pheno_nodic =pandas.read_csv(pheno_fn_bac,sep='\\t') pheno_nodic.set_index('IID', inplace=True, drop=True) pheno_s = pheno_nodic[[c for c in pheno_nodic.columns if", "not in args): features_to_drop += ['IsGenotek'] if ('covars' not in args) and ('covars_noPCs'", "Chicken Soup Freq', 'Internal Organs Freq', 'Fish Cooked, Baked or Grilled Freq', 'Fried", "('16s' in args): pheno = pheno[[c for c in pheno if c[:2] not", "c[:2]=='o_' ] if 'c' in args: mb_columns += [c for c in pheno.columns", "'Nuts, almonds, pistachios Freq','Peanuts Freq'] if 'vegetables' in args: mb_columns_extra += ['Tomato Freq','Cooked", "Cooked or Canned Freq','Persimmon Freq', 'Watermelon Freq', 'Dried Fruits Freq', 'Fruit Salad Freq']", "['lactose'] if 'blood' in args: # mb_columns += ['ALT','Albumin','AST','Basophils%','Calcium','Chloride','Cholesterol,total','Creatinine', # 'CRP(WIDERANGE)','CRPhs','Eosinophils%','HCT','HDLCholesterol','Hemoglobin','HbA1C%','Lymphocytes%', # 'MCH','MCHC','MCV','MPV','Monocytes%','Neutrophils%','Phosphorus','Platelets','Potassium','RBC',", "Chicken or Turkey Dishes Freq', 'Beef or Chicken Soup Freq', 'Internal Organs Freq',", "pheno.columns if c[:2]=='s_']] pheno_g = pheno[[c for c in pheno.columns if c[:2]=='g_']] ###", "'Dark Chocolate', 'Turkey Shawarma', 'Olive oil', #u'Parmesan\\xc2\\xa0cheese', 'Guacamole', 'Coleslaw', 'Americano', 'Pesek Zman snack',", "mb_columns += taxadf.columns.values.tolist() if 'all_bac' in args: args=list(args)+['s','g','f','o','c','p'] if 's' in args: mb_columns", "Soft Freq', 'Schnitzel Turkey or Chicken Freq', 'Chicken or Turkey With Skin Freq',", "'Sugar', 'Smoked Salmon', 'Melon', 'Roll', 'Whipped cream', 'Coconut milk', 'Pretzels', 'Kohlrabi', 'Eggplant Salad',", "the fly import ForPaper.VertexCut as vc df_household = pandas.read_csv(os.path.join(cleanDataPath, 'EnvironmentBlock.txt'), delim_whitespace=True) df_household =", "'keep_related', 'keep_sterile', '16s', '-9', 'covars_noPCs', 'PCs', 'lactose','include_allPNP','IsGenotek','permute','meals','other','drugs', 'calories','bloodType','questionnaires','keep_missingCovars','activity','activityTypesFreq','cereals', 'delivery','dressSweetners','drinks','fruits','hunger', 'legumes','meatProducts','pastry','qualityOfLiving', 'smoking','sweets','vegetables','womenOnlyQuestions', 'genotek_only', 'swab_only']", "'include_allPNP does not support dicotomize bacteria' if 'IsGenotek' in args: assert 'covars' not", "'Cottage cheese', 'Oil', 'Natural Yogurt', 'Walnuts', 'Edamame', 'Majadra', 'Oatmeal', 'Soy sauce', 'Strawberry', 'Pastrami',", "args: mb_columns += [c for c in pheno.columns if c[:2]=='c_' ] if 'p'", "Freq', 'Black or White Grains, Watermelon Seeds Freq', 'Nuts, almonds, pistachios Freq','Peanuts Freq']", "in args: mb_columns_extra += ['Work activity','Physical activity - mins','Physical activity - freq'] if", "'Omelette', 'Coated Wafers', 'Boiled corn', 'Chicken drumstick', 'Pita', 'Pasta Bolognese', 'Chicken Meatballs', 'Burekas',", "Cream or Popsicle which contains Dairy Freq', 'Popsicle Without Dairy Freq', 'Black or", "pheno_fn_bacDic=os.path.join(cleanDataPath,'dicNoMissingKakiPhenotypesWithCovariates_nodfukim.phenotypes') pheno_fn_bacAllPNP=os.path.join(rawDataPath,'allPNPPhenotypes.phenotypes') iidsNoSharedEnv='/net/mraid08/export/genie/Microbiome/Analyses/PNPChip/cleanData/PNP_autosomal_clean2_nodfukim_NoCouples.txt' PNP_16S_DIR = '/net/mraid08/export/jafar/Microbiome/Analyses/PNPChip/16S' glycemicStatusPath='/net/mraid08/export/jafar/Microbiome/Analyses/PNPChip/glycemic_status.csv' def extract(*args,**kwargs): known_args = ['dic', 'all_bac',", "Crackers Freq', 'Small Burekas Freq', 'Jachnun, Mlawah, Kubana, Cigars Freq', 'Pizza Freq'] if", "Steak, Golash Freq', 'Mixed Meat Dishes as Moussaka, Hamin, Cuba Freq', 'Mixed Chicken", "and ('covars' not in args) and ('covars_noPCs' not in args): features_to_drop += ['IsGenotek']", "'Triglycerides', 'LDLCholesterol'] mb_columns+=s_stats mb_columns=list(set(mb_columns)) if 'fid' in args: mb_columns = ['FID']+mb_columns ########################FFQ START#####################", "'16s' in args if 'dic' in args: pheno =pandas.read_csv(pheno_fn_bacDic,sep='\\t') pheno.set_index('IID', inplace=True, drop=True) pheno_nodic", "Freq', 'Zucchini or Eggplant Freq','Peas, Green Beans or Okra Cooked Freq', 'Cauliflower or", "'Sweet Roll', 'Danish', 'Coffee', 'Pasta Salad', 'Cuba', 'Chicken Liver', 'Sweet Challah', 'Minced meat',", "Cake', 'Doritos', 'Israeli couscous', 'Pistachio', 'Date honey', 'Vinaigrette', 'Bamba', 'Dark Chocolate', 'Turkey Shawarma',", "print pheno.loc[participant,['Calories_kcal','Carbs_g','Fat_g','Protain_g','Protain_g']] # print sum if np.isnan(pheno.loc[participant,'Age']) or np.isnan(pheno.loc[participant,'Gender']) : print(\"Participant %s, age", "] if 'o' in args: mb_columns += [c for c in pheno.columns if", "args, 'IsGenotek and covars are mutually exclusive' if 'otu' in args: assert '16s'", "if 'c' in args: mb_columns += [c for c in pheno.columns if c[:2]=='c_'", "kwargs['taxa']=[initial+kwargs['taxa'][1:] for initial in ('s_', 'g_', 'f_', 'o_', 'c_', 'p_')] elif kwargs['taxa'][1]=='_': kwargs['taxa']=[kwargs['taxa']]", "args, 'include_allPNP does not support dicotomize bacteria' if 'IsGenotek' in args: assert 'covars'", "args: #AddingIrisGlucose df_glucose = pandas.read_csv(glycemicStatusPath).set_index('RegNum') pheno = df_glucose.merge(pheno, left_index=True, right_index=True,how='right') mb_columns +=['median_Without_BMI_ALT_Overall','WakeupGlucose','BMI','VegeterianScale'] pheno.loc[pheno['VegeterianScale']<0,", "if 'keep_household' not in args: #noSharedEnvIID=pandas.read_csv(iidsNoSharedEnv,usecols=[0],header=None,sep='\\t') #pheno=pheno[pheno.index.isin(noSharedEnvIID[0].astype(int).values)] #new code that decides which individuals", "inplace=True, drop=True) if 'include_allPNP'in args: status, output = subprocess.getstatusoutput(\"cut -f 1 %s -d", "in pheno[['Age','Gender']].index.values: # if np.isnan(pheno.loc[participant,'Calories_kcal']) or \\ # np.isnan(pheno.loc[participant,'Carbs_g']) or \\ # np.isnan(pheno.loc[participant,'Fat_g'])", "'Marble Cake', 'Brown Rice', 'Cold cut', 'Gilthead Bream', 'Garlic', 'Grapes', 'Chocolate Chip Cookies',", "in args: args=list(args)+['s','g','f','o','c','p'] if 's' in args: mb_columns += [c for c in", "Freq', 'Thousand Island Dressing, Garlic Dressing Freq', 'Honey, Jam, fruit syrup, Maple syrup", "Tomatoes, Tomato Sauce, Tomato Soup Freq', 'Red Pepper Freq', 'Green Pepper Freq', 'Cucumber", "or np.isnan(pheno.loc[participant,'Gender']) : print(\"Participant %s, age %s, gender %s\" %(participant,pheno.loc[participant,'Age'],pheno.loc[participant,'Gender'])) # print pheno[['median_Without_BMI_ALT_Overall']]", "if c[:2]=='g_' ] if 'f' in args: mb_columns += [c for c in", "else: presence=(pheno[mb_columns]>threshold +1e-5).astype(int).sum() presence=presence[presence > len(presence)*ratio].index.values.tolist() pheno=pheno[other_columns+presence] if ('keep_related' not in args): #bed", "['ratio', 'threshold','taxa'] for arg in args: assert arg in known_args, 'unkown arg: %s'%(arg)", "'Diet Soda Freq', 'Regular Sodas with Sugar Freq', 'Decaffeinated Coffee Freq', 'Coffee Freq',", "cheese', 'Oil', 'Natural Yogurt', 'Walnuts', 'Edamame', 'Majadra', 'Oatmeal', 'Soy sauce', 'Strawberry', 'Pastrami', 'Lemonade',", "freq','T2Activity kind', 'Type 2 activity - freq','T3Activity kind','Type 3 activity - freq'] if", "freq','T3Activity kind','Type 3 activity - freq'] if 'bloodType' in args: mb_columns_extra += ['Blood", "['FID']+mb_columns ########################FFQ START##################### if 'questionnaires' in args: args=list(args)+ffq_args mb_columns_extra=[] if 'activity' in args:", "and no_log are mutually exclusive' pheno[mb_columns] = 10**pheno[mb_columns] if 'all_non_bac' in args: args=list(args)+['covars','blood','glucose','ffq','antropo']", "'Hummus Salad', 'Tilapia', 'Pizza', 'Fried cauliflower', 'Roasted eggplant', 'Baguette', 'Lentil Soup', 'Tzfatit Cheese',", "alpha_diversity_s[alpha_diversity_s < 15].index pheno=pheno[~pheno.index.isin(sterile_individuals)] if 'keep_household' not in args: #noSharedEnvIID=pandas.read_csv(iidsNoSharedEnv,usecols=[0],header=None,sep='\\t') #pheno=pheno[pheno.index.isin(noSharedEnvIID[0].astype(int).values)] #new code", "'D.All', 'D.Psychiatric', 'D.pain', 'D.CVD', 'D.GI','D.Thyroid', 'D.NSAID','D.Contraception'] meals=['Vodka or Arak', 'Avocado', 'Parsley', 'Coated peanuts',", "'Fries Freq', 'Pasta or Flakes Freq'] if 'delivery' in args: mb_columns_extra += ['C-Section','Home", "Yogurt', 'Walnuts', 'Edamame', 'Majadra', 'Oatmeal', 'Soy sauce', 'Strawberry', 'Pastrami', 'Lemonade', 'Pasta with tomato", "= ['activity','activityTypesFreq','cereals', 'delivery','dressSweetners','drinks','fruits','hunger', 'legumes','meatProducts','pastry','qualityOfLiving', 'smoking','sweets','vegetables','womenOnlyQuestions'] drug_args=['D.lipid', 'D.All', 'D.Psychiatric', 'D.pain', 'D.CVD', 'D.GI','D.Thyroid', 'D.NSAID','D.Contraception'] meals=['Vodka", "c[:2]=='s_']] pheno_g = pheno[[c for c in pheno.columns if c[:2]=='g_']] ### for c", "Cake', 'Brown Rice', 'Cold cut', 'Gilthead Bream', 'Garlic', 'Grapes', 'Chocolate Chip Cookies', 'Cucumber',", "activity - freq','T3Activity kind','Type 3 activity - freq'] if 'bloodType' in args: mb_columns_extra", "Freq', 'Canned Tuna or Tuna Salad Freq', 'Fish (not Tuna) Pickled, Dried, Smoked,", "donut', 'Rugelach', 'Cake', 'Ravioli', 'Tomatoes', 'Wholemeal Light Bread', 'Marble Cake', 'Brown Rice', 'Cold", "'16s', '-9', 'covars_noPCs', 'PCs', 'lactose','include_allPNP','IsGenotek','permute','meals','other','drugs', 'calories','bloodType','questionnaires','keep_missingCovars','activity','activityTypesFreq','cereals', 'delivery','dressSweetners','drinks','fruits','hunger', 'legumes','meatProducts','pastry','qualityOfLiving', 'smoking','sweets','vegetables','womenOnlyQuestions', 'genotek_only', 'swab_only'] ffq_args =", "'Coated peanuts', 'Sugar', 'Smoked Salmon', 'Melon', 'Roll', 'Whipped cream', 'Coconut milk', 'Pretzels', 'Kohlrabi',", "not in kwargs: threshold = -4 else: threshold=kwargs['threshold'] if 'ratio' in kwargs: ratio=kwargs['ratio']", "for Salads or Stews Freq','Mayonnaise Including Light Freq', 'Thousand Island Dressing, Garlic Dressing", "np.isnan(pheno.loc[participant,'Fat_g']) or \\ # np.isnan(pheno.loc[participant,'Protain_g']): # sum+=1 # print participant # print pheno.loc[participant,['Calories_kcal','Carbs_g','Fat_g','Protain_g','Protain_g']]", "if 'bloodType' in args: mb_columns_extra += ['Blood A','Blood B','Blood RH-'] if 'cereals' in", "'Lasagna', 'Popcorn', 'Hummus Salad', 'Tilapia', 'Pizza', 'Fried cauliflower', 'Roasted eggplant', 'Baguette', 'Lentil Soup',", "if 'qualityOfLiving' in args: mb_columns_extra += ['Stress','Sleep quality'] if 'smoking' in args: mb_columns_extra", "#AddingIrisGlucose df_glucose = pandas.read_csv(glycemicStatusPath).set_index('RegNum') pheno = df_glucose.merge(pheno, left_index=True, right_index=True,how='right') mb_columns +=['median_Without_BMI_ALT_Overall','WakeupGlucose','BMI','VegeterianScale'] pheno.loc[pheno['VegeterianScale']<0, 'VegeterianScale']=np.nan", "Freq', 'Pita Freq', 'Saltine Crackers or Matzah Freq', 'Wholemeal Crackers Freq', 'Small Burekas", "'Tofu', 'Wholemeal Pita', 'Sunflower seeds', 'Coriander', 'Ciabatta', 'Tomato sauce', 'Heavy cream', 'Banana', 'Kif", "# print participant # print pheno.loc[participant,['Calories_kcal','Carbs_g','Fat_g','Protain_g','Protain_g']] # print sum if np.isnan(pheno.loc[participant,'Age']) or np.isnan(pheno.loc[participant,'Gender'])", "for taxa_level in ['otu', 'species', 'genus', 'family', 'order', 'class', 'phylum']: df_taxa = pandas.read_csv(os.path.join(PNP_16S_DIR,", "Tuna Salad Freq', 'Fish (not Tuna) Pickled, Dried, Smoked, Canned Freq'] if 'pastry'", "df_household.columns if int(c) in pheno.index]] df_household = df_household[df_household.index.isin(pheno.index)] remove_inds = df_household.index[vc.VertexCut().work(df_household.values, 0.5)] pheno=pheno[~pheno.index.isin(remove_inds)]", "pheno.columns if c[:2]=='PC'] if 'lactose' in args: mb_columns += ['lactose'] if 'blood' in", "addition for Salads or Stews Freq','Mayonnaise Including Light Freq', 'Thousand Island Dressing, Garlic", "] if 'no_log' in args: assert 'dic' not in args, 'dic and no_log", "+= ['Oil as an addition for Salads or Stews Freq','Mayonnaise Including Light Freq',", "'Sweet Dry Wine, Cocktails Freq', 'Alcoholic Drinks Freq'] if 'fruits' in args: mb_columns_extra", "pheno.set_index('IID', inplace=True, drop=True) if 'include_allPNP'in args: status, output = subprocess.getstatusoutput(\"cut -f 1 %s", "'Rice Freq','Couscous, Burgul, Mamaliga, Groats Freq', 'Potatoes Boiled, Baked, Mashed, Potatoes Salad Freq',", "'Bissli', 'Pullet', 'Hummus', 'Chinese Chicken Noodles', 'Shakshouka', 'Tahini', 'Chicken breast', 'Steak', 'Light Bread',", "args: mb_columns += ['lactose'] if 'blood' in args: # mb_columns += ['ALT','Albumin','AST','Basophils%','Calcium','Chloride','Cholesterol,total','Creatinine', #", "potato', 'Wine', 'Cookies', 'Challah', 'Spelled', 'Honey', 'Green beans', 'Milk', 'Peanut Butter', 'Cooked carrots',", "mb_columns_extra += ['C-Section','Home delivery','Was breastfed'] if 'dressSweetners' in args: mb_columns_extra += ['Oil as", "'Orange or Grapefruit Freq', 'Orange or Grapefruit Juice Freq', 'Apple Freq', 'Apricot Fresh", "'Persimmon', 'Apple juice', 'Stuffed Peppers', 'Egg', 'Pear', 'Peas', 'Pecan', 'Cooked cauliflower', 'Cooked Sweet", "zucchini', 'Sweet potato', 'Wine', 'Cookies', 'Challah', 'Spelled', 'Honey', 'Green beans', 'Milk', 'Peanut Butter',", "Kebab in Pita Bread Freq', 'Falafel in Pita version 2 Freq','Processed Meat Products", "'s', 'g','f','o','c','p','otu', 'all_non_bac', 'covars', 'blood', 'glucose', 'ffq', 'antropo', 's_stats_pheno', 'fid', 'keep_household', 'no_log', 'keep_related',", "'s_', 'k_', 'p_', 'f_']): print c alpha_diversity_s = (pheno_s>pheno_s.min().min()).sum(axis=1) alpha_diversity_g = (pheno_g>pheno_g.min().min()).sum(axis=1) pheno.loc[pheno.Hips==-9,", "= pandas.read_csv(os.path.join(PNP_16S_DIR, taxa_level+'.txt'), sep='\\t', index_col=0) df_taxa[df_taxa<1e-3] = 1e-4 df_taxa = np.log10(df_taxa) pheno =", "-f 1 -d '_'\"%os.path.join(rawDataPath,'tmp','dfukim.txt')) pheno =pheno[~pheno.index.isin([int(dafook) for dafook in output.split('\\n')])] if ('16s' in", "'Smoked Salmon', 'Melon', 'Roll', 'Whipped cream', 'Coconut milk', 'Pretzels', 'Kohlrabi', 'Eggplant Salad', 'Cooked", "Cookies', 'Beef', 'Meatballs', 'Hamburger sandwich', 'Chicken thighs', 'Granola', 'Beet', 'Couscous', 'Beet Salad', 'Chocolate", "np.isnan(pheno.loc[participant,'Protain_g']): # sum+=1 # print participant # print pheno.loc[participant,['Calories_kcal','Carbs_g','Fat_g','Protain_g','Protain_g']] # print sum if", "if 'pastry' in args: mb_columns_extra += ['Ordinary Bread or Challah Freq', 'Light Bread", "'taxa is mutual exclusive with all_bac,s,g,f,o,c,p,otu' if 'include_allPNP' in args: assert 'dic' not", "alpha_diversity_s = (pheno_s>pheno_s.min().min()).sum(axis=1) alpha_diversity_g = (pheno_g>pheno_g.min().min()).sum(axis=1) pheno.loc[pheno.Hips==-9, 'WHR'] = np.nan pheno.loc[pheno.Waist==-9, 'WHR'] =", "Dairy Freq', 'Black or White Grains, Watermelon Seeds Freq', 'Nuts, almonds, pistachios Freq','Peanuts", "c in pheno.columns if c[:2]=='g_' ] if 'f' in args: mb_columns += [c", "Skin Freq', 'Chicken or Turkey Without Skin Freq', 'Sausages Freq', 'Sausages such as", "pheno.loc[:,mealsColumns]=pheno[mealsColumns][pheno[mealsColumns]!=-9].div(pheno['Calories_kcal_Total'].values,axis=0) pheno.replace(np.nan, 0,inplace=True) mb_columns += mealsColumns ########################FFQ END##################### #for c in pheno: print", "'Turkey Shawarma', 'Olive oil', #u'Parmesan\\xc2\\xa0cheese', 'Guacamole', 'Coleslaw', 'Americano', 'Pesek Zman snack', 'Green onions',", "Green Onion Freq', 'Fresh Vegetable Salad Without Dressing or Oil Freq', 'Fresh Vegetable", "mb_columns_extra += ['Cornflakes Freq','Granola or Bernflaks Freq','Cooked Cereal such as Oatmeal Porridge Freq',", "fat Milk', 'Pickled cucumber', 'Soymilk', 'Dates', 'Croissant', 'Biscuit', 'Potato chips', 'White Cheese', 'French", "ratio=kwargs['ratio'] mb_columns = [c for c in pheno.columns if c[:2] in ['s_','g_','f_','o_','c_','p_']] other_columns", "args: pheno = pheno.loc[pheno['IsGenotek']==1] if 'swab_only' in args: pheno = pheno.loc[pheno['IsGenotek']==0] mb_columns =", "Juice Freq', 'Apple Freq', 'Apricot Fresh or Dry, or Loquat Freq', 'Grapes or", "in args: mb_columns_extra += ['Is pregnant','Is breastfeeding','Is after birth', 'Taking contraceptives', 'Regular period',", "in args: pheno =pandas.read_csv(pheno_fn_bacDic,sep='\\t') pheno.set_index('IID', inplace=True, drop=True) pheno_nodic =pandas.read_csv(pheno_fn_bac,sep='\\t') pheno_nodic.set_index('IID', inplace=True, drop=True) pheno_s", "if 'threshold' not in kwargs: threshold = -4 else: threshold=kwargs['threshold'] if 'ratio' in", "'Banana Freq', 'Melon Freq', 'Kiwi or Strawberries Freq', 'Mango Freq', 'Peach, Nectarine, Plum", "'Lemonade', 'Pasta with tomato sauce', 'Chicken']#removed: u'Soda water',u'Water', u'Salt', known_args+= ffq_args known_args+= drug_args", "Noodles', 'Lentils', 'Mutton', 'Wholemeal Noodles', 'Green Tea', 'Schnitzel', 'Brown Sugar', 'Peanuts', 'Mayonnaise', 'Persimmon',", "Freq','Carrots, Fresh or Cooked, Carrot Juice Freq', 'Corn Freq', 'Parsley, Celery, Fennel, Dill,", "'Chicken or Turkey Without Skin Freq', 'Sausages Freq', 'Sausages such as Salami Freq',", "mb_columns_extra=[] if 'activity' in args: mb_columns_extra += ['Work activity','Physical activity - mins','Physical activity", "=pandas.read_csv(pheno_fn_bac,sep='\\t') pheno_nodic.set_index('IID', inplace=True, drop=True) pheno_s = pheno_nodic[[c for c in pheno_nodic.columns if c[:2]=='s_']]", "or Bageles Freq', 'Pita Freq', 'Saltine Crackers or Matzah Freq', 'Wholemeal Crackers Freq',", "'D.CVD', 'D.GI','D.Thyroid', 'D.NSAID','D.Contraception'] meals=['Vodka or Arak', 'Avocado', 'Parsley', 'Coated peanuts', 'Sugar', 'Smoked Salmon',", "mutual exclusive with all_bac,s,g,f,o,c,p,otu' if 'include_allPNP' in args: assert 'dic' not in args,", "Salad Freq', 'Tahini Salad Freq', 'Cooked Vegetable Salads Freq', 'Pickled Vegetables Freq', 'Olives", "known_args = ['dic', 'all_bac', 's', 'g','f','o','c','p','otu', 'all_non_bac', 'covars', 'blood', 'glucose', 'ffq', 'antropo', 's_stats_pheno',", "or Strawberries Freq', 'Mango Freq', 'Peach, Nectarine, Plum Freq', 'Pear Fresh, Cooked or", "covars are mutually exclusive' if 'otu' in args: assert '16s' in args if", "print(phenoChip.shape) print(phenoChip.columns) # print \"Only in chip:\" # print set(phenoChip.index.values)-set(phenoAll.index.values) # print len(set(phenoChip.index)-set(phenoAll.index))", "args: sterile_individuals = alpha_diversity_g[alpha_diversity_g < 4].index else: sterile_individuals = alpha_diversity_s[alpha_diversity_s < 15].index pheno=pheno[~pheno.index.isin(sterile_individuals)]", "df_household[[c for c in df_household.columns if int(c) in pheno.index]] df_household = df_household[df_household.index.isin(pheno.index)] remove_inds", "'glucose' in args: mb_columns += ['95P_Glucose','Glucose_Noise','Max_Glucose','Median_Glucose','WakeupGlucose', 'MeanGlucoseResponse','MeanBreadResponse','MeanBreadButterResponse'] if 'ffq' in args: mb_columns +=", "'Nuts', 'Goat Cheese', 'Jam', 'Feta Cheese', 'Mandarin', 'Pesto', 'Sugar substitute', 'Cheesecake', 'Raisins', 'Chocolate',", "in pheno.columns if c[:2] not in ['s_','g_','f_','o_','c_','p_']] if 'dic' in args: presence=((pheno[mb_columns]>threshold +1e-5)&(pheno[mb_columns]!=0)).astype(int).sum()", "taxa in kwargs['taxa']: taxadf=pheno.filter(regex=(taxa)) mb_columns += taxadf.columns.values.tolist() if 'all_bac' in args: args=list(args)+['s','g','f','o','c','p'] if", "args, 'dic and no_log are mutually exclusive' pheno[mb_columns] = 10**pheno[mb_columns] if 'all_non_bac' in", "participant 86356,762339,805175 have no 'Age','Gender' # if set(['Age','Gender','Calories_kcal','Carbs_g','Fat_g','Protain_g','IsGenotek'])<=set(pheno.columns.values): keep_inds=pheno.loc[:,['Age','Gender','Calories_kcal','Carbs_g','Fat_g','Protain_g','IsGenotek']].replace(-9, np.nan).dropna().index.values beforeNumParticpants=pheno.shape[0] pheno=pheno.loc[keep_inds] afterNumParticpants=pheno.shape[0]", "print(phenoChip.columns) # print \"Only in chip:\" # print set(phenoChip.index.values)-set(phenoAll.index.values) # print len(set(phenoChip.index)-set(phenoAll.index)) print(pheno.columns.values.tolist())", "Dried, Smoked, Canned Freq'] if 'pastry' in args: mb_columns_extra += ['Ordinary Bread or", "in args): pheno.replace(-9, np.nan, inplace=True) if 'permute' in args: pheno=pandas.DataFrame(pheno.values[np.random.permutation(pheno.shape[0])],index=pheno.index,columns=pheno.columns) return pheno if", "Turkey Dishes Freq', 'Beef or Chicken Soup Freq', 'Internal Organs Freq', 'Fish Cooked,", "Salad Freq', 'Fish (not Tuna) Pickled, Dried, Smoked, Canned Freq'] if 'pastry' in", "if c[:2]=='g_']] ### for c in pheno: ### if (c[:2] not in ['c_',", "inplace=True, drop=True) pheno_nodic =pandas.read_csv(pheno_fn_bac,sep='\\t') pheno_nodic.set_index('IID', inplace=True, drop=True) pheno_s = pheno_nodic[[c for c in", "'Orange', 'Rice', 'Diet Fruit Drink', 'Corn schnitzel', 'Cappuccino', 'Low fat Milk', 'Pickled cucumber',", "> len(presence)*ratio].index.values.tolist() pheno=pheno[other_columns+presence] if ('keep_related' not in args): #bed = Bed(os.path.join(cleanDataPath, 'PNP_autosomal_clean2_nodfukim_norelated'), count_A1=True)#.read()", "'Bamba', 'Dark Chocolate', 'Turkey Shawarma', 'Olive oil', #u'Parmesan\\xc2\\xa0cheese', 'Guacamole', 'Coleslaw', 'Americano', 'Pesek Zman", "'PCs', 'lactose','include_allPNP','IsGenotek','permute','meals','other','drugs', 'calories','bloodType','questionnaires','keep_missingCovars','activity','activityTypesFreq','cereals', 'delivery','dressSweetners','drinks','fruits','hunger', 'legumes','meatProducts','pastry','qualityOfLiving', 'smoking','sweets','vegetables','womenOnlyQuestions', 'genotek_only', 'swab_only'] ffq_args = ['activity','activityTypesFreq','cereals', 'delivery','dressSweetners','drinks','fruits','hunger', 'legumes','meatProducts','pastry','qualityOfLiving',", "'activityTypesFreq' in args: mb_columns_extra += ['T1Activity kind','Type 1 activity - freq','T2Activity kind', 'Type", "Fruit Drink', 'Corn schnitzel', 'Cappuccino', 'Low fat Milk', 'Pickled cucumber', 'Soymilk', 'Dates', 'Croissant',", "c in pheno.columns if c[:2] in ['s_','g_','f_','o_','c_','p_']] other_columns = [c for c in", "set(['Age','Gender','Calories_kcal','Carbs_g','Fat_g','Protain_g','IsGenotek'])<=set(pheno.columns.values): keep_inds=pheno.loc[:,['Age','Gender','Calories_kcal','Carbs_g','Fat_g','Protain_g','IsGenotek']].replace(-9, np.nan).dropna().index.values beforeNumParticpants=pheno.shape[0] pheno=pheno.loc[keep_inds] afterNumParticpants=pheno.shape[0] if beforeNumParticpants-afterNumParticpants>0: pass #print \"Removing participants with", "Without Skin Freq', 'Sausages Freq', 'Sausages such as Salami Freq', 'Pastrami or Smoked", "'Oil', 'Natural Yogurt', 'Walnuts', 'Edamame', 'Majadra', 'Oatmeal', 'Soy sauce', 'Strawberry', 'Pastrami', 'Lemonade', 'Pasta", "'f_']): print c alpha_diversity_s = (pheno_s>pheno_s.min().min()).sum(axis=1) alpha_diversity_g = (pheno_g>pheno_g.min().min()).sum(axis=1) pheno.loc[pheno.Hips==-9, 'WHR'] = np.nan", "extract('s','include_allPNP') print(phenoAll.shape) print(phenoAll.columns) phenoChip = extract('keep_household','s','keep_related') print(phenoChip.shape) print(phenoChip.columns) # print \"Only in chip:\"", "= alpha_diversity_g[alpha_diversity_g < 4].index else: sterile_individuals = alpha_diversity_s[alpha_diversity_s < 15].index pheno=pheno[~pheno.index.isin(sterile_individuals)] if 'keep_household'", "'Banana', 'Kif Kef', 'Mustard', 'Coke', 'Vegetable Soup', 'Sausages', 'Pancake', 'Pasta', 'Sauteed vegetables', 'Plum',", "'include_allPNP' in args: assert 'dic' not in args, 'include_allPNP does not support dicotomize", "or Stews Freq','Mayonnaise Including Light Freq', 'Thousand Island Dressing, Garlic Dressing Freq', 'Honey,", "= df_household[df_household.index.isin(pheno.index)] remove_inds = df_household.index[vc.VertexCut().work(df_household.values, 0.5)] pheno=pheno[~pheno.index.isin(remove_inds)] if 'keep_missingCovars' not in args: #One", "broccoli', 'Beef Cholent', 'Cracker', 'Chocolate Cookies', 'White beans', 'Cooked zucchini', 'Sweet potato', 'Wine',", "#One participant 244624 has no 'Calories_kcal','Carbs_g','Fat_g','Protain_g' #3 participant 86356,762339,805175 have no 'Age','Gender' #", "Freq'] if 'fruits' in args: mb_columns_extra += ['Mandarin or Clementine Freq', 'Orange or", "Cheese', 'French fries', 'Wholemeal Bread', 'Tuna Salad', 'Chocolate spread', 'Kebab', 'Rice crackers', 'Wafers',", "if 'taxa' in kwargs: if kwargs['taxa'][0]=='*': kwargs['taxa']=[initial+kwargs['taxa'][1:] for initial in ('s_', 'g_', 'f_',", "Freq', 'White or Brown Sugar Freq', 'Artificial Sweeteners Freq',] if 'drinks' in args:", "import pandas import os import numpy as np import sys from pysnptools.snpreader.bed import", "if c[:2] not in ('s_', 'g_', 'f_', 'o_', 'c_', 'p_')]] for taxa_level in", "'sweets' in args: mb_columns_extra += ['Milk or Dark Chocolate Freq', 'Salty Snacks Freq',", "Products Freq','Beef, Veal, Lamb, Pork, Steak, Golash Freq', 'Mixed Meat Dishes as Moussaka,", "'Tilapia', 'Pizza', 'Fried cauliflower', 'Roasted eggplant', 'Baguette', 'Lentil Soup', 'Tzfatit Cheese', 'Nectarine', 'Chicken", "else: features_to_drop +=['Age','Gender'] if ('include_allPNP' not in args) and ('PCs' not in args):", "'Pecan', 'Cooked cauliflower', 'Cooked Sweet potato', 'Butter', 'Omelette', 'Coated Wafers', 'Boiled corn', 'Chicken", "if 'ffq' in args: mb_columns += ['Alcoholic_Drinks_Freq','Cigarretes_per_day','Coffee_Freq','Start_smoking_age'] if 'antropo' in args: mb_columns +=", "'pastry' in args: mb_columns_extra += ['Ordinary Bread or Challah Freq', 'Light Bread Freq',", "in pheno.columns if c[:2]=='p_' ] if 'otu' in args: mb_columns += [c for", "assert 'dic' not in args, '16s and dic are mutually exclusive' if ('taxa'", "mb_columns += [arg] mb_columns_extra=[val.replace(' ','_') for val in mb_columns_extra] mb_columns+=mb_columns_extra if 'meals' in", "['activity','activityTypesFreq','cereals', 'delivery','dressSweetners','drinks','fruits','hunger', 'legumes','meatProducts','pastry','qualityOfLiving', 'smoking','sweets','vegetables','womenOnlyQuestions'] drug_args=['D.lipid', 'D.All', 'D.Psychiatric', 'D.pain', 'D.CVD', 'D.GI','D.Thyroid', 'D.NSAID','D.Contraception'] meals=['Vodka or", "args: mb_columns_extra += ['Blood A','Blood B','Blood RH-'] if 'cereals' in args: mb_columns_extra +=", "args: args=list(args)+['s','g','f','o','c','p'] if 's' in args: mb_columns += [c for c in pheno.columns", "mb_columns_extra += ['Work activity','Physical activity - mins','Physical activity - freq'] if 'activityTypesFreq' in", "participants with missing covars!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\" #print beforeNumParticpants-afterNumParticpants #print \"805175 has no FFQ!!!!! that is", "'dic' not in args, 'include_allPNP does not support dicotomize bacteria' if 'IsGenotek' in", "'legumes' in args: mb_columns_extra += ['Falafel in Pita Bread Freq', 'Cooked Legumes Freq',", "breastfed'] if 'dressSweetners' in args: mb_columns_extra += ['Oil as an addition for Salads", "'-9', 'covars_noPCs', 'PCs', 'lactose','include_allPNP','IsGenotek','permute','meals','other','drugs', 'calories','bloodType','questionnaires','keep_missingCovars','activity','activityTypesFreq','cereals', 'delivery','dressSweetners','drinks','fruits','hunger', 'legumes','meatProducts','pastry','qualityOfLiving', 'smoking','sweets','vegetables','womenOnlyQuestions', 'genotek_only', 'swab_only'] ffq_args = ['activity','activityTypesFreq','cereals',", "pheno['HDLCholesterol'] - 2*pheno['Triglycerides'] if 'genotek_only' in args: pheno = pheno.loc[pheno['IsGenotek']==1] if 'swab_only' in", "Freq', 'Processed Meat Free Products Freq'] if 'meatProducts' in args: mb_columns_extra += ['Egg", "'Sausages such as Salami Freq', 'Pastrami or Smoked Turkey Breast Freq', 'Turkey Meatballs,", "mb_columns += [c for c in pheno.columns if c[:2]=='s_' ] if 'g' in", "for c in pheno: ### if (c[:2] not in ['c_', 'g_', 'o_', 's_',", "pheno.columns if c[:4]=='OTU_' ] if 'no_log' in args: assert 'dic' not in args,", "Celery, Fennel, Dill, Cilantro, Green Onion Freq', 'Fresh Vegetable Salad Without Dressing or", "Dairy Freq', 'Popsicle Without Dairy Freq', 'Black or White Grains, Watermelon Seeds Freq',", "Arak', 'Avocado', 'Parsley', 'Coated peanuts', 'Sugar', 'Smoked Salmon', 'Melon', 'Roll', 'Whipped cream', 'Coconut", "sterile_individuals = alpha_diversity_s[alpha_diversity_s < 15].index pheno=pheno[~pheno.index.isin(sterile_individuals)] if 'keep_household' not in args: #noSharedEnvIID=pandas.read_csv(iidsNoSharedEnv,usecols=[0],header=None,sep='\\t') #pheno=pheno[pheno.index.isin(noSharedEnvIID[0].astype(int).values)]", "if c[:2]=='PC'] if 'lactose' in args: mb_columns += ['lactose'] if 'blood' in args:", "+= ['Currently smokes','Ever smoked'] if 'sweets' in args: mb_columns_extra += ['Milk or Dark", "in kwargs: if kwargs['taxa'][0]=='*': kwargs['taxa']=[initial+kwargs['taxa'][1:] for initial in ('s_', 'g_', 'f_', 'o_', 'c_',", "pheno =pandas.read_csv(pheno_fn_bac,sep='\\t') pheno.set_index('IID', inplace=True, drop=True) if 'include_allPNP'in args: status, output = subprocess.getstatusoutput(\"cut -f", "Freq', 'Garlic Freq', 'Vegetable Soup Freq', 'Hummus Salad Freq', 'Tahini Salad Freq', 'Cooked", "in ('s_', 'g_', 'f_', 'o_', 'c_', 'p_')] elif kwargs['taxa'][1]=='_': kwargs['taxa']=[kwargs['taxa']] for taxa in", "Cheese', 'Mandarin', 'Pesto', 'Sugar substitute', 'Cheesecake', 'Raisins', 'Chocolate', 'Quinoa', 'Cooked broccoli', 'Beef Cholent',", "1 activity - freq','T2Activity kind', 'Type 2 activity - freq','T3Activity kind','Type 3 activity", "in ('s_', 'g_', 'f_', 'o_', 'c_', 'p_')]] for taxa_level in ['otu', 'species', 'genus',", "Freq', 'Cake, Torte Cakes, Chocolate Cake Freq', 'Fruit Pie or Cake Freq', 'Coated", "'Jewish donut', 'Rugelach', 'Cake', 'Ravioli', 'Tomatoes', 'Wholemeal Light Bread', 'Marble Cake', 'Brown Rice',", "'Pasta with tomato sauce', 'Chicken']#removed: u'Soda water',u'Water', u'Salt', known_args+= ffq_args known_args+= drug_args known_kwargs", "'Hamburger', 'Dark Beer', 'Cooked beets', 'Almonds', 'Falafel', 'Noodles', 'Jachnun', 'Turkey', 'Sushi', 'Brazil nuts',", "Sprouts, Green or Red Cabbage Freq', 'Lettuce Freq','Carrots, Fresh or Cooked, Carrot Juice", "'Apricot Fresh or Dry, or Loquat Freq', 'Grapes or Raisins Freq', 'Banana Freq',", "['95P_Glucose','Glucose_Noise','Max_Glucose','Median_Glucose','WakeupGlucose', 'MeanGlucoseResponse','MeanBreadResponse','MeanBreadButterResponse'] if 'ffq' in args: mb_columns += ['Alcoholic_Drinks_Freq','Cigarretes_per_day','Coffee_Freq','Start_smoking_age'] if 'antropo' in args:", "pheno = df_glucose.merge(pheno, left_index=True, right_index=True,how='right') mb_columns +=['median_Without_BMI_ALT_Overall','WakeupGlucose','BMI','VegeterianScale'] pheno.loc[pheno['VegeterianScale']<0, 'VegeterianScale']=np.nan if 'drugs' in args:", "return pheno if __name__==\"__main__\": # pheno=extract('dic','covars','keep_household',\"pastry\",ratio=0.2)#'all_bac' phenoAll = extract('s','include_allPNP','covars')#'include_allPNP','keep_household','ffq','keep_related')#'include_allPNP', print(phenoAll.shape) print(phenoAll.columns) phenoAll =", "'Chicken legs', 'Nuts', 'Goat Cheese', 'Jam', 'Feta Cheese', 'Mandarin', 'Pesto', 'Sugar substitute', 'Cheesecake',", "'g','f','o','c','p','otu']).intersection(set(args)))==0, \\ 'taxa is mutual exclusive with all_bac,s,g,f,o,c,p,otu' if 'include_allPNP' in args: assert", "Meat Dishes as Moussaka, Hamin, Cuba Freq', 'Mixed Chicken or Turkey Dishes Freq',", "'Wholemeal Crackers', 'Sugar Free Gum', 'Hamburger', 'Dark Beer', 'Cooked beets', 'Almonds', 'Falafel', 'Noodles',", "'Grilled cheese', 'Bissli', 'Pullet', 'Hummus', 'Chinese Chicken Noodles', 'Shakshouka', 'Tahini', 'Chicken breast', 'Steak',", "or Biscuits Freq', 'Simple Cookies or Biscuits Freq', 'Ice Cream or Popsicle which", "'Egg, Hard Boiled or Soft Freq', 'Schnitzel Turkey or Chicken Freq', 'Chicken or", "('covars_noPCs' not in args) and ('other' not in args): if 'calories' not in", "mb_columns_extra=[val.replace(' ','_') for val in mb_columns_extra] mb_columns+=mb_columns_extra if 'meals' in args: mealsColumns=[val.replace(' ','_')", "Roll', 'Danish', 'Coffee', 'Pasta Salad', 'Cuba', 'Chicken Liver', 'Sweet Challah', 'Minced meat', 'Chocolate", "df_household = pandas.read_csv(os.path.join(cleanDataPath, 'EnvironmentBlock.txt'), delim_whitespace=True) df_household = df_household[[c for c in df_household.columns if", "pheno = pheno.merge(df_taxa, left_index=True, right_index=True) pheno_s = pheno[[c for c in pheno.columns if", "other_columns = [c for c in pheno.columns if c[:2] not in ['s_','g_','f_','o_','c_','p_']] if", "pheno=pheno.loc[keep_inds] afterNumParticpants=pheno.shape[0] if beforeNumParticpants-afterNumParticpants>0: pass #print \"Removing participants with missing covars!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\" #print beforeNumParticpants-afterNumParticpants", "# pheno=extract('dic','covars','keep_household',\"pastry\",ratio=0.2)#'all_bac' phenoAll = extract('s','include_allPNP','covars')#'include_allPNP','keep_household','ffq','keep_related')#'include_allPNP', print(phenoAll.shape) print(phenoAll.columns) phenoAll = extract('s','include_allPNP') print(phenoAll.shape) print(phenoAll.columns) phenoChip", "print(phenoAll.shape) print(phenoAll.columns) phenoAll = extract('s','include_allPNP') print(phenoAll.shape) print(phenoAll.columns) phenoChip = extract('keep_household','s','keep_related') print(phenoChip.shape) print(phenoChip.columns) #", "'Pita Freq', 'Saltine Crackers or Matzah Freq', 'Wholemeal Crackers Freq', 'Small Burekas Freq',", "birth', 'Taking contraceptives', 'Regular period', 'Irregular period', 'No period','Hormonal replacment', 'Past breastfeeding'] if", "Dry Wine, Cocktails Freq', 'Alcoholic Drinks Freq'] if 'fruits' in args: mb_columns_extra +=", "args: mb_columns_extra += ['Ordinary Bread or Challah Freq', 'Light Bread Freq', 'Wholemeal or", "Cake Freq', 'Fruit Pie or Cake Freq', 'Coated or Stuffed Cookies, Waffles or", "kwargs['taxa']=[kwargs['taxa']] for taxa in kwargs['taxa']: taxadf=pheno.filter(regex=(taxa)) mb_columns += taxadf.columns.values.tolist() if 'all_bac' in args:", "'smoking','sweets','vegetables','womenOnlyQuestions', 'genotek_only', 'swab_only'] ffq_args = ['activity','activityTypesFreq','cereals', 'delivery','dressSweetners','drinks','fruits','hunger', 'legumes','meatProducts','pastry','qualityOfLiving', 'smoking','sweets','vegetables','womenOnlyQuestions'] drug_args=['D.lipid', 'D.All', 'D.Psychiatric', 'D.pain',", "'Plum', 'Goat Milk Yogurt', 'Orange juice', 'Potatoes', 'Halva', 'Yellow pepper', 'Mango', 'Lasagna', 'Popcorn',", "'Fried onions', 'Ice cream', 'Cream Cake', 'Green cabbage', 'Olives', 'Balsamic vinegar', 'Peach', 'Light", "Freq', 'Red Pepper Freq', 'Green Pepper Freq', 'Cucumber Freq', 'Zucchini or Eggplant Freq','Peas,", "total calories pheno.loc[:,mealsColumns]=pheno[mealsColumns][pheno[mealsColumns]!=-9].div(pheno['Calories_kcal_Total'].values,axis=0) pheno.replace(np.nan, 0,inplace=True) mb_columns += mealsColumns ########################FFQ END##################### #for c in", "Potato Freq', 'Brussels Sprouts, Green or Red Cabbage Freq', 'Lettuce Freq','Carrots, Fresh or", "Fresh or Cooked, Carrot Juice Freq', 'Corn Freq', 'Parsley, Celery, Fennel, Dill, Cilantro,", "mb_columns_extra += ['General Hunger','Morning Hunger', 'Midday Hunger', 'Evening Hunger'] if 'legumes' in args:", "'o' in args: mb_columns += [c for c in pheno.columns if c[:2]=='o_' ]", "in args: mb_columns_extra += ['Nectar, Cider Freq', 'Diet Juice Freq', 'Juice Freq', 'Diet", "'Mixed Meat Dishes as Moussaka, Hamin, Cuba Freq', 'Mixed Chicken or Turkey Dishes", "if 'smoking' in args: mb_columns_extra += ['Currently smokes','Ever smoked'] if 'sweets' in args:", "Chocolate Cake Freq', 'Fruit Pie or Cake Freq', 'Coated or Stuffed Cookies, Waffles", "sterile_individuals = alpha_diversity_g[alpha_diversity_g < 4].index else: sterile_individuals = alpha_diversity_s[alpha_diversity_s < 15].index pheno=pheno[~pheno.index.isin(sterile_individuals)] if", "arg in args: assert arg in known_args, 'unkown arg: %s'%(arg) for kwarg in", "no 'Age','Gender' # if set(['Age','Gender','Calories_kcal','Carbs_g','Fat_g','Protain_g','IsGenotek'])<=set(pheno.columns.values): keep_inds=pheno.loc[:,['Age','Gender','Calories_kcal','Carbs_g','Fat_g','Protain_g','IsGenotek']].replace(-9, np.nan).dropna().index.values beforeNumParticpants=pheno.shape[0] pheno=pheno.loc[keep_inds] afterNumParticpants=pheno.shape[0] if beforeNumParticpants-afterNumParticpants>0: pass", "'Yellow pepper', 'Mango', 'Lasagna', 'Popcorn', 'Hummus Salad', 'Tilapia', 'Pizza', 'Fried cauliflower', 'Roasted eggplant',", "args: mb_columns_extra += ['Mandarin or Clementine Freq', 'Orange or Grapefruit Freq', 'Orange or", "# print \"Only in chip:\" # print set(phenoChip.index.values)-set(phenoAll.index.values) # print len(set(phenoChip.index)-set(phenoAll.index)) print(pheno.columns.values.tolist()) print(pheno.shape)", "'Pullet', 'Hummus', 'Chinese Chicken Noodles', 'Shakshouka', 'Tahini', 'Chicken breast', 'Steak', 'Light Bread', 'Wholemeal", "'Sweet Challah', 'Minced meat', 'Chocolate cake', 'Diet Coke', 'Dried dates', 'Carrot Cake', 'Doritos',", "+= ['ALT','Albumin','AST','Basophils%','Calcium','Chloride','Cholesterol,total','Creatinine', 'CRP(WIDERANGE)','CRPhs','Eosinophils%','HCT','HDLCholesterol','Hemoglobin','HbA1C%','Lymphocytes%', 'MCH','MCHC','MCV','MPV','Monocytes%','Neutrophils%','Phosphorus','Platelets','Potassium','RBC', 'RDW','Sodium','TSH','WBC','LDLCholesterol'] if 'glucose' in args: mb_columns += ['95P_Glucose','Glucose_Noise','Max_Glucose','Median_Glucose','WakeupGlucose', 'MeanGlucoseResponse','MeanBreadResponse','MeanBreadButterResponse']", "thighs', 'Granola', 'Beet', 'Couscous', 'Beet Salad', 'Chocolate Mousse Cake', 'Sweet Roll', 'Danish', 'Coffee',", "Snacks Freq', 'Cheese Cakes or Cream Cakes Freq', 'Yeast Cakes and Cookies as", "Pepper Freq', 'Green Pepper Freq', 'Cucumber Freq', 'Zucchini or Eggplant Freq','Peas, Green Beans", "mb_columns_extra += ['Stress','Sleep quality'] if 'smoking' in args: mb_columns_extra += ['Currently smokes','Ever smoked']", "Bread Freq', 'Falafel in Pita version 2 Freq','Processed Meat Products Freq','Beef, Veal, Lamb,", "Freq', 'Melon Freq', 'Kiwi or Strawberries Freq', 'Mango Freq', 'Peach, Nectarine, Plum Freq',", "in args: mb_columns += [c for c in pheno.columns if c[:2]=='p_' ] if", "Freq','Couscous, Burgul, Mamaliga, Groats Freq', 'Potatoes Boiled, Baked, Mashed, Potatoes Salad Freq', 'Fries", "'White beans', 'Cooked zucchini', 'Sweet potato', 'Wine', 'Cookies', 'Challah', 'Spelled', 'Honey', 'Green beans',", "c in pheno.columns if c[:2]=='PC'] if 'lactose' in args: mb_columns += ['lactose'] if", "+= ['Blood A','Blood B','Blood RH-'] if 'cereals' in args: mb_columns_extra += ['Cornflakes Freq','Granola", "'Wholemeal Crackers Freq', 'Small Burekas Freq', 'Jachnun, Mlawah, Kubana, Cigars Freq', 'Pizza Freq']", "Zman snack', 'Green onions', 'Mushrooms', 'Lemon juice', 'Canned Tuna Fish', 'Vegetable Salad', 'Fried", "'g','f','o','c','p','otu', 'all_non_bac', 'covars', 'blood', 'glucose', 'ffq', 'antropo', 's_stats_pheno', 'fid', 'keep_household', 'no_log', 'keep_related', 'keep_sterile',", "= pandas.read_csv(os.path.join(cleanDataPath, 'PNP_autosomal_clean2_nodfukim.fam'), delim_whitespace=True, index_col=0, header=None) df_related=df_fam[~df_fam.index.isin(df_fam_no_related.index)] pheno=pheno[(~pheno.index.isin(df_related.index))] if ('keep_sterile') not in args:", "'Lemon', 'Salty Cookies', 'Beef', 'Meatballs', 'Hamburger sandwich', 'Chicken thighs', 'Granola', 'Beet', 'Couscous', 'Beet", "'Sugar substitute', 'Cheesecake', 'Raisins', 'Chocolate', 'Quinoa', 'Cooked broccoli', 'Beef Cholent', 'Cracker', 'Chocolate Cookies',", "Golash Freq', 'Mixed Meat Dishes as Moussaka, Hamin, Cuba Freq', 'Mixed Chicken or" ]
[ "regex_pattern[3].strip()) new_evidence_text = re.sub(r\"\\s{2,}\", \" \", new_evidence_text) new_evidence_text = re.sub(r'(\\\\)(\\w)', r'\\g<2>', new_evidence_text) new_evidence_text", "elif result['errors']: if not reports: logger.info('\\n'.join([x.to_string() for x in result['errors']]) + \"\\n\") else:", "== np.str: df.hint = df.hint.str.replace(r'\\|', '&#124;') if df.entry.dtype == np.str: df.entry = df.entry.str.replace(r'\\|',", "np.str: df.entry = df.entry.str.replace(r'\\|', '&#124;') df.url = [(\"[url](\" + str(x) + \")\" if", "2.0, and 2.1. Defaults to the most recent version. tree: bool Generates a", "if report.endswith('.txt'): open(report, \"w\").write(df.to_string()) if report.endswith('.html'): df.to_html(report) if report.endswith('.md'): cols = df.columns df2", "(default). json_file: bool If True, generates a JSON file that can be used", "to True. force_json: bool If True, will create an importable JSON file even", "\", regex_pattern[3].strip()) new_evidence_text = re.sub(r\"\\s{2,}\", \" \", new_evidence_text) new_evidence_text = re.sub(r'(\\\\)(\\w)', r'\\g<2>', new_evidence_text)", "re.findall(r'\\n((SET\\s+(DOCUMENT\\s+Description|Evidence|SupportingText)' r'\\s*=\\s*)\"(((?<=\\\\)\"|[^\"])+)\"\\s*\\n*)', content): if regex_pattern[2].startswith(\"DOCUMENT\"): new_prefix = \"SET DOCUMENT Description = \" else:", "available are 1.0, 2.0, and 2.1. Defaults to the most recent version. tree:", "else: bel_files = [bel_path] return bel_files def _write_report(reports: Union[Iterable[str], str], result: dict, report_type:", "new_content = new_content.replace(regex_pattern[0], new_evidence) if content != new_content: if new_file_path: with open(new_file_path +", "derived from the BEL file. Defaults to False. sqlalchemy_connection_str: str Path to SQLLite", "\"w\").write(df.to_string()) if report.endswith('.html'): df.to_html(report) if report.endswith('.md'): cols = df.columns df2 = pd.DataFrame([['---', ]", "belfile: content = belfile.read() new_content = content for regex_pattern in re.findall(r'\\n((SET\\s+(DOCUMENT\\s+Description|Evidence|SupportingText)' r'\\s*=\\s*)\"(((?<=\\\\)\"|[^\"])+)\"\\s*\\n*)', content):", "[] for file in os.listdir(bel_path): if file.endswith(\".bel\"): bel_file_path = os.path.join(bel_path, file) bel_files.append(bel_file_path) else:", "report_type options should be constants errors_or_warns_as_list_of_dicts = [x.to_dict() for x in result[report_type]] columns", "Description = \" else: new_prefix = \"SET Support = \" new_evidence_text = re.sub(r\"(\\\\?[\\r\\n]+)|\\\\", "no errors create a JSON file for the import of BEL network into", "output_file: output_file.write(new_content) def _write_odb_json(bel_path: str, results: dict, bel_version: str) -> str: json_path =", "If None given, it uses the generated e(BE:L) database (default). json_file: bool If", "the generated e(BE:L) database (default). json_file: bool If True, generates a JSON file", "check_bel_script, bel_to_json logger = logging.getLogger(__name__) def validate_bel_file(bel_script_path: str, force_new_db: bool = False, line_by_line:", "include: CSV, TSV, TXT, XLS, XLSX, JSON, HTML, MD bel_version: {'1', '2', '2_1'}", "= False, reports: Union[Iterable[str], str] = None, bel_version: str = '2_1', tree: bool", "= re.sub(r\"(\\\\?[\\r\\n]+)|\\\\ \", \" \", regex_pattern[3].strip()) new_evidence_text = re.sub(r\"\\s{2,}\", \" \", new_evidence_text) new_evidence_text", "# regular expression for missing continuous line (\\ at the end of line)", "not pd.isna(x) else '') for x in df.url] url_template = \"[%s](\" + report.split(\".bel.\")[0]", "that path.\"\"\" if os.path.isdir(bel_path): bel_files = [] for file in os.listdir(bel_path): if file.endswith(\".bel\"):", "---------- bel_script_path : str Path to the BEL file. new_file_path : str (optional)", "create a JSON file for the import of BEL network into Cytoscape: >", "depending on the file name suffix in reports. Parameters ---------- reports : Iterable[str]", "+= 1 if isinstance(reports, str): reports = reports.split(\",\") for report in reports: if", "document. Parameters ---------- bel_script_path : str Path to the BEL file. new_file_path :", "] * len(cols)], columns=cols) if df.hint.dtype == np.str: df.hint = df.hint.str.replace(r'\\|', '&#124;') if", "content for regex_pattern in re.findall(r'\\n((SET\\s+(DOCUMENT\\s+Description|Evidence|SupportingText)' r'\\s*=\\s*)\"(((?<=\\\\)\"|[^\"])+)\"\\s*\\n*)', content): if regex_pattern[2].startswith(\"DOCUMENT\"): new_prefix = \"SET DOCUMENT", "to False. Returns ------- dict Dictionary of file paths and results for each", "bool = False, line_by_line: bool = False, reports: Union[Iterable[str], str] = None, bel_version:", "script for correct syntax following eBNF grammar. Parameters ---------- bel_script_path: str Path to", "Multiple formats of the report can be generated at once. Acceptable formats include:", "reports) else: logger.info(\"\\n\".join([x.to_string() for x in result]) + \"\\n\") else: if sqlalchemy_connection_str: ebel.database.set_connection(sqlalchemy_connection_str)", "def _create_list_bel_files(bel_path: str) -> list: \"\"\"Export all BEL files in directory as list.", "\"keyword\", \"entry\", \"line_number\", \"column\", \"hint\"] df = pd.DataFrame(data=errors_or_warns_as_list_of_dicts, columns=columns) df.index += 1 if", "evidence: # regular expression for missing continuous line (\\ at the end of", "= [] for file in os.listdir(bel_path): if file.endswith(\".bel\"): bel_file_path = os.path.join(bel_path, file) bel_files.append(bel_file_path)", "create error reports in Markdown and JSON format. In case of no errors", "open(new_file_path + \".diff2repaired\", \"w\") as new_file: new_file.write('\\n'.join(list(difflib.ndiff(content.split(\"\\n\"), new_content.split(\"\\n\"))))) else: with open(bel_script_path, \"w\") as", "BEL network into Cytoscape: > ebel validate my.bel -v 2 -r error_report.md,error_report.json \"\"\"", "logger.error(\"Tree can not be printed because errors still exists\\n\") else: logger.debug(result['tree']) validation_results[bel_file]['tree'] =", "= content for regex_pattern in re.findall(r'\\n((SET\\s+(DOCUMENT\\s+Description|Evidence|SupportingText)' r'\\s*=\\s*)\"(((?<=\\\\)\"|[^\"])+)\"\\s*\\n*)', content): if regex_pattern[2].startswith(\"DOCUMENT\"): new_prefix = \"SET", "Defaults to the most recent version. tree: bool Generates a tree of relationships", "str, results: dict, bel_version: str) -> str: json_path = bel_path + \".json\" if", "JSON, HTML, MD bel_version: {'1', '2', '2_1'} Which BEL grammar version should be", "TODO: This is perhaps not working result = check_bel_script_line_by_line(bel_script_path, error_report_file_path=reports, bel_version=bel_version) if reports:", "numpy as np import pandas as pd import ebel.database from ebel.parser import check_bel_script_line_by_line,", "reports and reports.startswith('\"') and reports.endswith('\"'): reports = reports[1:-1] if line_by_line: # TODO: This", "result = check_bel_script( bel_script_path=bel_file, force_new_db=force_new_db, bel_version=bel_version, ) if json_file: if not result['errors'] or", "new_evidence_text = fill(new_evidence_text, break_long_words=False).replace(\"\\n\", \" \\\\\\n\") new_evidence = new_prefix + '\"' + new_evidence_text", "results: dict, bel_version: str) -> str: json_path = bel_path + \".json\" if int(bel_version[0])", "list: \"\"\"Export all BEL files in directory as list. If single file is", "grammar or syntax errors. Defaults to True. force_json: bool If True, will create", "= pd.DataFrame(data=errors_or_warns_as_list_of_dicts, columns=columns) df.index += 1 if isinstance(reports, str): reports = reports.split(\",\") for", "False, reports: Union[Iterable[str], str] = None, bel_version: str = '2_1', tree: bool =", "file paths for the reports written. \"\"\" # TODO: report_type options should be", "Parameters ---------- bel_script_path : str Path to the BEL file. new_file_path : str", "report.endswith('.xls'): df.to_excel(report) if report.endswith('.xlsx'): df.to_excel(report, engine='xlsxwriter') if report.endswith('.tsv'): df.to_csv(report, sep='\\t') if report.endswith('.json'): df.to_json(report)", "'2_1', tree: bool = False, sqlalchemy_connection_str: str = None, json_file: bool = True,", "= None, bel_version: str = '2_1', tree: bool = False, sqlalchemy_connection_str: str =", "if new_file_path: with open(new_file_path + \".diff2repaired\", \"w\") as new_file: new_file.write('\\n'.join(list(difflib.ndiff(content.split(\"\\n\"), new_content.split(\"\\n\"))))) else: with", "of namespaces/values and generate a new one. Defaults to False. line_by_line: bool TODO:", "BEL file. Current available are 1.0, 2.0, and 2.1. Defaults to the most", "= check_bel_script_line_by_line(bel_script_path, error_report_file_path=reports, bel_version=bel_version) if reports: logger.info(\"Wrote report to %s\\n\" % reports) else:", "passed, returns a list with that path.\"\"\" if os.path.isdir(bel_path): bel_files = [] for", "database. Only creates the JSON file when there are no grammar or syntax", "bel_version=bel_version) if reports: logger.info(\"Wrote report to %s\\n\" % reports) else: logger.info(\"\\n\".join([x.to_string() for x", "as output_file: output_file.write(new_content) def _write_odb_json(bel_path: str, results: dict, bel_version: str) -> str: json_path", "Create dict to be filled for individual BEL files. validation_results[bel_file] = dict() logger.info(f\"Processing", "to the BEL file. new_file_path : str (optional) Export repaired version of file", ": str `report_type` could be 'warnings' or 'errors'. Returns ------- list List of", "or syntax errors. Defaults to True. force_json: bool If True, will create an", "Markdown and JSON format. In case of no errors create a JSON file", "check_bel_script_line_by_line, check_bel_script, bel_to_json logger = logging.getLogger(__name__) def validate_bel_file(bel_script_path: str, force_new_db: bool = False,", "are no grammar or syntax errors. Defaults to True. force_json: bool If True,", "if int(bel_version[0]) > 1: json_tree = bel_to_json(results['tree']) open(json_path, \"w\").write(json_tree) return json_path def _create_list_bel_files(bel_path:", "'') for x in df.url] url_template = \"[%s](\" + report.split(\".bel.\")[0] + \".bel?expanded=true&viewer=simple#L%s)\" df.line_number", "missing continuous line (\\ at the end of line) with open(bel_script_path, \"r\", encoding=\"utf-8\")", "\" else: new_prefix = \"SET Support = \" new_evidence_text = re.sub(r\"(\\\\?[\\r\\n]+)|\\\\ \", \"", "of no errors create a JSON file for the import of BEL network", "import csv import difflib import logging from typing import Iterable, Union, Optional from", "types depending on the file name suffix in reports. Parameters ---------- reports :", "= df.columns df2 = pd.DataFrame([['---', ] * len(cols)], columns=cols) if df.hint.dtype == np.str:", "report_type='warnings') validation_results[bel_file]['reports'] = report_paths elif result['errors']: if not reports: logger.info('\\n'.join([x.to_string() for x in", "open(bel_script_path, \"w\") as output_file: output_file.write(new_content) def _write_odb_json(bel_path: str, results: dict, bel_version: str) ->", "= \"[%s](\" + report.split(\".bel.\")[0] + \".bel?expanded=true&viewer=simple#L%s)\" df.line_number = [url_template % (x, x) for", "and values. If None given, it uses the generated e(BE:L) database (default). json_file:", "of file paths and results for each BEL file processed. Examples -------- Task:", "else: with open(bel_script_path, \"w\") as output_file: output_file.write(new_content) def _write_odb_json(bel_path: str, results: dict, bel_version:", "file processed. Examples -------- Task: Validate BEL script `my.bel` for BEL syntax 2.0,", "bel_files = [] for file in os.listdir(bel_path): if file.endswith(\".bel\"): bel_file_path = os.path.join(bel_path, file)", "if report.endswith('.xlsx'): df.to_excel(report, engine='xlsxwriter') if report.endswith('.tsv'): df.to_csv(report, sep='\\t') if report.endswith('.json'): df.to_json(report) if report.endswith('.txt'):", "\"\"\"Write report in different types depending on the file name suffix in reports.", "open(report, \"w\").write(df.to_string()) if report.endswith('.html'): df.to_html(report) if report.endswith('.md'): cols = df.columns df2 = pd.DataFrame([['---',", "Export repaired version of file to new path. \"\"\" # if evidence: #", "eBNF grammar. Parameters ---------- bel_script_path: str Path to BEL file or directory contaiing", "version of file to new path. \"\"\" # if evidence: # regular expression", "report.endswith('.csv'): df.to_csv(report) if report.endswith('.xls'): df.to_excel(report) if report.endswith('.xlsx'): df.to_excel(report, engine='xlsxwriter') if report.endswith('.tsv'): df.to_csv(report, sep='\\t')", "the JSON file when there are no grammar or syntax errors. Defaults to", "should be constants errors_or_warns_as_list_of_dicts = [x.to_dict() for x in result[report_type]] columns = [report_type[:-1]", "BEL file.\"\"\" import os import re import csv import difflib import logging from", "if report.endswith('.xls'): df.to_excel(report) if report.endswith('.xlsx'): df.to_excel(report, engine='xlsxwriter') if report.endswith('.tsv'): df.to_csv(report, sep='\\t') if report.endswith('.json'):", "`my.bel` for BEL syntax 2.0, create error reports in Markdown and JSON format.", "'2', '2_1'} Which BEL grammar version should be used for validating the BEL", "True. force_json: bool If True, will create an importable JSON file even if", "if there are namespace/value errors. Defaults to False. Returns ------- dict Dictionary of", "dict Dictionary of file paths and results for each BEL file processed. Examples", "df.to_json(report) if report.endswith('.txt'): open(report, \"w\").write(df.to_string()) if report.endswith('.html'): df.to_html(report) if report.endswith('.md'): cols = df.columns", "BEL syntax 2.0, create error reports in Markdown and JSON format. In case", "in Markdown and JSON format. In case of no errors create a JSON", "report.endswith('.txt'): open(report, \"w\").write(df.to_string()) if report.endswith('.html'): df.to_html(report) if report.endswith('.md'): cols = df.columns df2 =", "\"_class\", \"url\", \"keyword\", \"entry\", \"line_number\", \"column\", \"hint\"] df = pd.DataFrame(data=errors_or_warns_as_list_of_dicts, columns=columns) df.index +=", "should be used for validating the BEL file. Current available are 1.0, 2.0,", "if bel_script_path.startswith('\"') and bel_script_path.endswith('\"'): bel_script_path = bel_script_path[1:-1] if reports and reports.startswith('\"') and reports.endswith('\"'):", "r'\\s*=\\s*)\"(((?<=\\\\)\"|[^\"])+)\"\\s*\\n*)', content): if regex_pattern[2].startswith(\"DOCUMENT\"): new_prefix = \"SET DOCUMENT Description = \" else: new_prefix", "bel_to_json logger = logging.getLogger(__name__) def validate_bel_file(bel_script_path: str, force_new_db: bool = False, line_by_line: bool", "Only creates the JSON file when there are no grammar or syntax errors.", "for regex_pattern in re.findall(r'\\n((SET\\s+(DOCUMENT\\s+Description|Evidence|SupportingText)' r'\\s*=\\s*)\"(((?<=\\\\)\"|[^\"])+)\"\\s*\\n*)', content): if regex_pattern[2].startswith(\"DOCUMENT\"): new_prefix = \"SET DOCUMENT Description", "df2 = pd.DataFrame([['---', ] * len(cols)], columns=cols) if df.hint.dtype == np.str: df.hint =", "= \"SET DOCUMENT Description = \" else: new_prefix = \"SET Support = \"", "printed because errors still exists\\n\") else: logger.debug(result['tree']) validation_results[bel_file]['tree'] = result['tree'] if result['warnings'] and", "dict, bel_version: str) -> str: json_path = bel_path + \".json\" if int(bel_version[0]) >", "report can be generated at once. Acceptable formats include: CSV, TSV, TXT, XLS,", "paths and results for each BEL file processed. Examples -------- Task: Validate BEL", "= None): \"\"\"Repair a BEL document. Parameters ---------- bel_script_path : str Path to", "def _write_odb_json(bel_path: str, results: dict, bel_version: str) -> str: json_path = bel_path +", "= reports[1:-1] if line_by_line: # TODO: This is perhaps not working result =", "new_content.split(\"\\n\"))))) else: with open(bel_script_path, \"w\") as output_file: output_file.write(new_content) def _write_odb_json(bel_path: str, results: dict,", "str List of file paths to write reports to. Multiple formats of the", "List of file paths for the reports written. \"\"\" # TODO: report_type options", "will create an importable JSON file even if there are namespace/value errors. Defaults", "result = check_bel_script_line_by_line(bel_script_path, error_report_file_path=reports, bel_version=bel_version) if reports: logger.info(\"Wrote report to %s\\n\" % reports)", "(optional) Export repaired version of file to new path. \"\"\" # if evidence:", "len(cols)], columns=cols) if df.hint.dtype == np.str: df.hint = df.hint.str.replace(r'\\|', '&#124;') if df.entry.dtype ==", "= logging.getLogger(__name__) def validate_bel_file(bel_script_path: str, force_new_db: bool = False, line_by_line: bool = False,", "= json_file if tree: if result['errors']: logger.error(\"Tree can not be printed because errors", "if regex_pattern[2].startswith(\"DOCUMENT\"): new_prefix = \"SET DOCUMENT Description = \" else: new_prefix = \"SET", "comma separated list of report file names. result : dict return value of", "df.hint.str.replace(r'\\|', '&#124;') if df.entry.dtype == np.str: df.entry = df.entry.str.replace(r'\\|', '&#124;') df.url = [(\"[url](\"", "> ebel validate my.bel -v 2 -r error_report.md,error_report.json \"\"\" validation_results = dict() if", "not result['errors'] or force_json: json_file = _write_odb_json(bel_path=bel_file, results=result, bel_version=bel_version) validation_results[bel_file]['json'] = json_file if", "bel_version: str) -> str: json_path = bel_path + \".json\" if int(bel_version[0]) > 1:", "Parameters ---------- bel_script_path: str Path to BEL file or directory contaiing BEL files.", "case of no errors create a JSON file for the import of BEL", "dict to be filled for individual BEL files. validation_results[bel_file] = dict() logger.info(f\"Processing {bel_file}\")", "-> list: \"\"\"Export all BEL files in directory as list. If single file", "df.hint = df.hint.str.replace(r'\\|', '&#124;') if df.entry.dtype == np.str: df.entry = df.entry.str.replace(r'\\|', '&#124;') df.url", "url_template = \"[%s](\" + report.split(\".bel.\")[0] + \".bel?expanded=true&viewer=simple#L%s)\" df.line_number = [url_template % (x, x)", "Returns ------- list List of file paths for the reports written. \"\"\" #", "import logging from typing import Iterable, Union, Optional from textwrap import fill import", "df = pd.DataFrame(data=errors_or_warns_as_list_of_dicts, columns=columns) df.index += 1 if isinstance(reports, str): reports = reports.split(\",\")", "= dict() if bel_script_path.startswith('\"') and bel_script_path.endswith('\"'): bel_script_path = bel_script_path[1:-1] if reports and reports.startswith('\"')", "-r error_report.md,error_report.json \"\"\" validation_results = dict() if bel_script_path.startswith('\"') and bel_script_path.endswith('\"'): bel_script_path = bel_script_path[1:-1]", "= [report_type[:-1] + \"_class\", \"url\", \"keyword\", \"entry\", \"line_number\", \"column\", \"hint\"] df = pd.DataFrame(data=errors_or_warns_as_list_of_dicts,", "file. new_file_path : str (optional) Export repaired version of file to new path.", "the BEL file. Current available are 1.0, 2.0, and 2.1. Defaults to the", "from textwrap import fill import numpy as np import pandas as pd import", "`report_type` could be 'warnings' or 'errors'. Returns ------- list List of file paths", "if not pd.isna(x) else '') for x in df.url] url_template = \"[%s](\" +", "re.sub(r\"\\s{2,}\", \" \", new_evidence_text) new_evidence_text = re.sub(r'(\\\\)(\\w)', r'\\g<2>', new_evidence_text) new_evidence_text = fill(new_evidence_text, break_long_words=False).replace(\"\\n\",", "be used for importing BEL relationships into an e(BE:L) generated OrientDB database. Only", "check_bel_script( bel_script_path=bel_file, force_new_db=force_new_db, bel_version=bel_version, ) if json_file: if not result['errors'] or force_json: json_file", "\" \", regex_pattern[3].strip()) new_evidence_text = re.sub(r\"\\s{2,}\", \" \", new_evidence_text) new_evidence_text = re.sub(r'(\\\\)(\\w)', r'\\g<2>',", "\", new_evidence_text) new_evidence_text = re.sub(r'(\\\\)(\\w)', r'\\g<2>', new_evidence_text) new_evidence_text = fill(new_evidence_text, break_long_words=False).replace(\"\\n\", \" \\\\\\n\")", "database of namespaces/values and generate a new one. Defaults to False. line_by_line: bool", "with open(new_file_path + \".diff2repaired\", \"w\") as new_file: new_file.write('\\n'.join(list(difflib.ndiff(content.split(\"\\n\"), new_content.split(\"\\n\"))))) else: with open(bel_script_path, \"w\")", "= new_prefix + '\"' + new_evidence_text + '\"\\n\\n' new_content = new_content.replace(regex_pattern[0], new_evidence) if", "List of file paths to write reports to. Multiple formats of the report", "a JSON file that can be used for importing BEL relationships into an", "\".diff2repaired\", \"w\") as new_file: new_file.write('\\n'.join(list(difflib.ndiff(content.split(\"\\n\"), new_content.split(\"\\n\"))))) else: with open(bel_script_path, \"w\") as output_file: output_file.write(new_content)", "False, line_by_line: bool = False, reports: Union[Iterable[str], str] = None, bel_version: str =", "json_file: bool = True, force_json: bool = False,): \"\"\"Validate BEL script for correct", "report_type : str `report_type` could be 'warnings' or 'errors'. Returns ------- list List", "+ str(x) + \")\" if not pd.isna(x) else '') for x in df.url]", "ebel.database from ebel.parser import check_bel_script_line_by_line, check_bel_script, bel_to_json logger = logging.getLogger(__name__) def validate_bel_file(bel_script_path: str,", "methods used for validating a BEL file.\"\"\" import os import re import csv", "file.endswith(\".bel\"): bel_file_path = os.path.join(bel_path, file) bel_files.append(bel_file_path) else: bel_files = [bel_path] return bel_files def", "TXT, XLS, XLSX, JSON, HTML, MD bel_version: {'1', '2', '2_1'} Which BEL grammar", "# TODO: This is perhaps not working result = check_bel_script_line_by_line(bel_script_path, error_report_file_path=reports, bel_version=bel_version) if", "formats of the report can be generated at once. Acceptable formats include: CSV,", "logging from typing import Iterable, Union, Optional from textwrap import fill import numpy", "there are namespace/value errors. Defaults to False. Returns ------- dict Dictionary of file", "path. \"\"\" # if evidence: # regular expression for missing continuous line (\\", "JSON format. In case of no errors create a JSON file for the", "reports = reports[1:-1] if line_by_line: # TODO: This is perhaps not working result", "if sqlalchemy_connection_str: ebel.database.set_connection(sqlalchemy_connection_str) bel_files = _create_list_bel_files(bel_path=bel_script_path) validation_results['bel_files_checked'] = bel_files for bel_file in bel_files:", "str (optional) Export repaired version of file to new path. \"\"\" # if", "this. reports: Iterable[str] or str List of file paths to write reports to.", "line_by_line: bool = False, reports: Union[Iterable[str], str] = None, bel_version: str = '2_1',", "Defaults to False. line_by_line: bool TODO: Write this. reports: Iterable[str] or str List", "= '2_1', tree: bool = False, sqlalchemy_connection_str: str = None, json_file: bool =", "+ \"\\n\") else: if sqlalchemy_connection_str: ebel.database.set_connection(sqlalchemy_connection_str) bel_files = _create_list_bel_files(bel_path=bel_script_path) validation_results['bel_files_checked'] = bel_files for", "bel_to_json(results['tree']) open(json_path, \"w\").write(json_tree) return json_path def _create_list_bel_files(bel_path: str) -> list: \"\"\"Export all BEL", "reports.split(\",\") for report in reports: if report.endswith('.csv'): df.to_csv(report) if report.endswith('.xls'): df.to_excel(report) if report.endswith('.xlsx'):", "list: \"\"\"Write report in different types depending on the file name suffix in", "bel_version=bel_version) validation_results[bel_file]['json'] = json_file if tree: if result['errors']: logger.error(\"Tree can not be printed", "validation_results = dict() if bel_script_path.startswith('\"') and bel_script_path.endswith('\"'): bel_script_path = bel_script_path[1:-1] if reports and", "str) -> list: \"\"\"Export all BEL files in directory as list. If single", "+ \".json\" if int(bel_version[0]) > 1: json_tree = bel_to_json(results['tree']) open(json_path, \"w\").write(json_tree) return json_path", "True, generates a JSON file that can be used for importing BEL relationships", "reports to. Multiple formats of the report can be generated at once. Acceptable", "bel_version=bel_version, ) if json_file: if not result['errors'] or force_json: json_file = _write_odb_json(bel_path=bel_file, results=result,", "reports: logger.info('\\n'.join([x.to_string() for x in result['errors']]) + \"\\n\") else: _write_report(reports, result, report_type='errors') def", "Validate BEL script `my.bel` for BEL syntax 2.0, create error reports in Markdown", "file names. result : dict return value of check_bel_script methode. report_type : str", "file. Defaults to False. sqlalchemy_connection_str: str Path to SQLLite database to be used", "def repair_bel_file(bel_script_path: str, new_file_path: Optional[str] = None): \"\"\"Repair a BEL document. Parameters ----------", "Write this. reports: Iterable[str] or str List of file paths to write reports", "with that path.\"\"\" if os.path.isdir(bel_path): bel_files = [] for file in os.listdir(bel_path): if", "If True, will create an importable JSON file even if there are namespace/value", "True, will create an importable JSON file even if there are namespace/value errors.", "the report can be generated at once. Acceptable formats include: CSV, TSV, TXT,", "'errors'. Returns ------- list List of file paths for the reports written. \"\"\"", "str(x) + \")\" if not pd.isna(x) else '') for x in df.url] url_template", "in result[report_type]] columns = [report_type[:-1] + \"_class\", \"url\", \"keyword\", \"entry\", \"line_number\", \"column\", \"hint\"]", "csv import difflib import logging from typing import Iterable, Union, Optional from textwrap", "be generated at once. Acceptable formats include: CSV, TSV, TXT, XLS, XLSX, JSON,", "force_new_db: bool Delete current database of namespaces/values and generate a new one. Defaults", "Path to the BEL file. new_file_path : str (optional) Export repaired version of", "for file in os.listdir(bel_path): if file.endswith(\".bel\"): bel_file_path = os.path.join(bel_path, file) bel_files.append(bel_file_path) else: bel_files", "return bel_files def _write_report(reports: Union[Iterable[str], str], result: dict, report_type: str) -> list: \"\"\"Write", "+ \"_class\", \"url\", \"keyword\", \"entry\", \"line_number\", \"column\", \"hint\"] df = pd.DataFrame(data=errors_or_warns_as_list_of_dicts, columns=columns) df.index", "If True, generates a JSON file that can be used for importing BEL", "of report formats or comma separated list of report file names. result :", "for validating the BEL file. Current available are 1.0, 2.0, and 2.1. Defaults", "import ebel.database from ebel.parser import check_bel_script_line_by_line, check_bel_script, bel_to_json logger = logging.getLogger(__name__) def validate_bel_file(bel_script_path:", "[report_type[:-1] + \"_class\", \"url\", \"keyword\", \"entry\", \"line_number\", \"column\", \"hint\"] df = pd.DataFrame(data=errors_or_warns_as_list_of_dicts, columns=columns)", "= df.entry.str.replace(r'\\|', '&#124;') df.url = [(\"[url](\" + str(x) + \")\" if not pd.isna(x)", "\"r\", encoding=\"utf-8\") as belfile: content = belfile.read() new_content = content for regex_pattern in", "else '') for x in df.url] url_template = \"[%s](\" + report.split(\".bel.\")[0] + \".bel?expanded=true&viewer=simple#L%s)\"", "an importable JSON file even if there are namespace/value errors. Defaults to False.", "Which BEL grammar version should be used for validating the BEL file. Current", "Returns ------- dict Dictionary of file paths and results for each BEL file", "<filename>ebel/validate.py \"\"\"Collect of methods used for validating a BEL file.\"\"\" import os import", "the import of BEL network into Cytoscape: > ebel validate my.bel -v 2", "bel_version: {'1', '2', '2_1'} Which BEL grammar version should be used for validating", "check_bel_script_line_by_line(bel_script_path, error_report_file_path=reports, bel_version=bel_version) if reports: logger.info(\"Wrote report to %s\\n\" % reports) else: logger.info(\"\\n\".join([x.to_string()", "= \" else: new_prefix = \"SET Support = \" new_evidence_text = re.sub(r\"(\\\\?[\\r\\n]+)|\\\\ \",", "or comma separated list of report file names. result : dict return value", "import pandas as pd import ebel.database from ebel.parser import check_bel_script_line_by_line, check_bel_script, bel_to_json logger", "\"\"\" # TODO: report_type options should be constants errors_or_warns_as_list_of_dicts = [x.to_dict() for x", "reports: Union[Iterable[str], str] = None, bel_version: str = '2_1', tree: bool = False,", "to be filled for individual BEL files. validation_results[bel_file] = dict() logger.info(f\"Processing {bel_file}\") result", "if report.endswith('.tsv'): df.to_csv(report, sep='\\t') if report.endswith('.json'): df.to_json(report) if report.endswith('.txt'): open(report, \"w\").write(df.to_string()) if report.endswith('.html'):", "report.endswith('.html'): df.to_html(report) if report.endswith('.md'): cols = df.columns df2 = pd.DataFrame([['---', ] * len(cols)],", "textwrap import fill import numpy as np import pandas as pd import ebel.database", "report in reports: if report.endswith('.csv'): df.to_csv(report) if report.endswith('.xls'): df.to_excel(report) if report.endswith('.xlsx'): df.to_excel(report, engine='xlsxwriter')", "= [url_template % (x, x) for x in df.line_number] df3 = pd.concat([df2, df])", "= False,): \"\"\"Validate BEL script for correct syntax following eBNF grammar. Parameters ----------", "BEL file or directory contaiing BEL files. force_new_db: bool Delete current database of", "output_file.write(new_content) def _write_odb_json(bel_path: str, results: dict, bel_version: str) -> str: json_path = bel_path", "content = belfile.read() new_content = content for regex_pattern in re.findall(r'\\n((SET\\s+(DOCUMENT\\s+Description|Evidence|SupportingText)' r'\\s*=\\s*)\"(((?<=\\\\)\"|[^\"])+)\"\\s*\\n*)', content): if", "reports in Markdown and JSON format. In case of no errors create a", "= _write_report(reports, result, report_type='warnings') validation_results[bel_file]['reports'] = report_paths elif result['errors']: if not reports: logger.info('\\n'.join([x.to_string()", "given, it uses the generated e(BE:L) database (default). json_file: bool If True, generates", "result, report_type='warnings') validation_results[bel_file]['reports'] = report_paths elif result['errors']: if not reports: logger.info('\\n'.join([x.to_string() for x", "str) -> list: \"\"\"Write report in different types depending on the file name", "if reports: logger.info(\"Wrote report to %s\\n\" % reports) else: logger.info(\"\\n\".join([x.to_string() for x in", "open(json_path, \"w\").write(json_tree) return json_path def _create_list_bel_files(bel_path: str) -> list: \"\"\"Export all BEL files", "= bel_script_path[1:-1] if reports and reports.startswith('\"') and reports.endswith('\"'): reports = reports[1:-1] if line_by_line:", "to. Multiple formats of the report can be generated at once. Acceptable formats", "of line) with open(bel_script_path, \"r\", encoding=\"utf-8\") as belfile: content = belfile.read() new_content =", "reports written. \"\"\" # TODO: report_type options should be constants errors_or_warns_as_list_of_dicts = [x.to_dict()", "This is perhaps not working result = check_bel_script_line_by_line(bel_script_path, error_report_file_path=reports, bel_version=bel_version) if reports: logger.info(\"Wrote", "if report.endswith('.md'): cols = df.columns df2 = pd.DataFrame([['---', ] * len(cols)], columns=cols) if", "at once. Acceptable formats include: CSV, TSV, TXT, XLS, XLSX, JSON, HTML, MD", "-> str: json_path = bel_path + \".json\" if int(bel_version[0]) > 1: json_tree =", "if isinstance(reports, str): reports = reports.split(\",\") for report in reports: if report.endswith('.csv'): df.to_csv(report)", "* len(cols)], columns=cols) if df.hint.dtype == np.str: df.hint = df.hint.str.replace(r'\\|', '&#124;') if df.entry.dtype", "a BEL file.\"\"\" import os import re import csv import difflib import logging", "Iterable, Union, Optional from textwrap import fill import numpy as np import pandas", "used for validating the BEL file. Current available are 1.0, 2.0, and 2.1.", "= check_bel_script( bel_script_path=bel_file, force_new_db=force_new_db, bel_version=bel_version, ) if json_file: if not result['errors'] or force_json:", "for storing/looking up used namespaces and values. If None given, it uses the", "are namespace/value errors. Defaults to False. Returns ------- dict Dictionary of file paths", "line_by_line: # TODO: This is perhaps not working result = check_bel_script_line_by_line(bel_script_path, error_report_file_path=reports, bel_version=bel_version)", "bel_version: str = '2_1', tree: bool = False, sqlalchemy_connection_str: str = None, json_file:", "'\"\\n\\n' new_content = new_content.replace(regex_pattern[0], new_evidence) if content != new_content: if new_file_path: with open(new_file_path", "dict, report_type: str) -> list: \"\"\"Write report in different types depending on the", "BEL file processed. Examples -------- Task: Validate BEL script `my.bel` for BEL syntax", "dict() logger.info(f\"Processing {bel_file}\") result = check_bel_script( bel_script_path=bel_file, force_new_db=force_new_db, bel_version=bel_version, ) if json_file: if", "json_file if tree: if result['errors']: logger.error(\"Tree can not be printed because errors still", "Iterable[str] or str List of file paths to write reports to. Multiple formats", "working result = check_bel_script_line_by_line(bel_script_path, error_report_file_path=reports, bel_version=bel_version) if reports: logger.info(\"Wrote report to %s\\n\" %", "and reports.endswith('\"'): reports = reports[1:-1] if line_by_line: # TODO: This is perhaps not", "new_file.write('\\n'.join(list(difflib.ndiff(content.split(\"\\n\"), new_content.split(\"\\n\"))))) else: with open(bel_script_path, \"w\") as output_file: output_file.write(new_content) def _write_odb_json(bel_path: str, results:", "report in different types depending on the file name suffix in reports. Parameters", "force_new_db=force_new_db, bel_version=bel_version, ) if json_file: if not result['errors'] or force_json: json_file = _write_odb_json(bel_path=bel_file,", "x in df.url] url_template = \"[%s](\" + report.split(\".bel.\")[0] + \".bel?expanded=true&viewer=simple#L%s)\" df.line_number = [url_template", "{'1', '2', '2_1'} Which BEL grammar version should be used for validating the", "because errors still exists\\n\") else: logger.debug(result['tree']) validation_results[bel_file]['tree'] = result['tree'] if result['warnings'] and reports:", "fill(new_evidence_text, break_long_words=False).replace(\"\\n\", \" \\\\\\n\") new_evidence = new_prefix + '\"' + new_evidence_text + '\"\\n\\n'", "a list with that path.\"\"\" if os.path.isdir(bel_path): bel_files = [] for file in", "report formats or comma separated list of report file names. result : dict", "if content != new_content: if new_file_path: with open(new_file_path + \".diff2repaired\", \"w\") as new_file:", "!= new_content: if new_file_path: with open(new_file_path + \".diff2repaired\", \"w\") as new_file: new_file.write('\\n'.join(list(difflib.ndiff(content.split(\"\\n\"), new_content.split(\"\\n\")))))", "------- dict Dictionary of file paths and results for each BEL file processed.", "def validate_bel_file(bel_script_path: str, force_new_db: bool = False, line_by_line: bool = False, reports: Union[Iterable[str],", "bel_script_path : str Path to the BEL file. new_file_path : str (optional) Export", "and bel_script_path.endswith('\"'): bel_script_path = bel_script_path[1:-1] if reports and reports.startswith('\"') and reports.endswith('\"'): reports =", "to write reports to. Multiple formats of the report can be generated at", "BEL document. Parameters ---------- bel_script_path : str Path to the BEL file. new_file_path", "df.entry.dtype == np.str: df.entry = df.entry.str.replace(r'\\|', '&#124;') df.url = [(\"[url](\" + str(x) +", "if report.endswith('.json'): df.to_json(report) if report.endswith('.txt'): open(report, \"w\").write(df.to_string()) if report.endswith('.html'): df.to_html(report) if report.endswith('.md'): cols", "+ '\"\\n\\n' new_content = new_content.replace(regex_pattern[0], new_evidence) if content != new_content: if new_file_path: with", "e(BE:L) database (default). json_file: bool If True, generates a JSON file that can", "the BEL file. Defaults to False. sqlalchemy_connection_str: str Path to SQLLite database to", "there are no grammar or syntax errors. Defaults to True. force_json: bool If", ": str (optional) Export repaired version of file to new path. \"\"\" #", "bool If True, generates a JSON file that can be used for importing", "for correct syntax following eBNF grammar. Parameters ---------- bel_script_path: str Path to BEL", "bel_files def _write_report(reports: Union[Iterable[str], str], result: dict, report_type: str) -> list: \"\"\"Write report", "bool = True, force_json: bool = False,): \"\"\"Validate BEL script for correct syntax", "= report_paths elif result['errors']: if not reports: logger.info('\\n'.join([x.to_string() for x in result['errors']]) +", "\"SET DOCUMENT Description = \" else: new_prefix = \"SET Support = \" new_evidence_text", "validating a BEL file.\"\"\" import os import re import csv import difflib import", "as new_file: new_file.write('\\n'.join(list(difflib.ndiff(content.split(\"\\n\"), new_content.split(\"\\n\"))))) else: with open(bel_script_path, \"w\") as output_file: output_file.write(new_content) def _write_odb_json(bel_path:", "reports. Parameters ---------- reports : Iterable[str] or str List of report formats or", "the BEL file. new_file_path : str (optional) Export repaired version of file to", "logger.debug(result['tree']) validation_results[bel_file]['tree'] = result['tree'] if result['warnings'] and reports: report_paths = _write_report(reports, result, report_type='warnings')", "= re.sub(r'(\\\\)(\\w)', r'\\g<2>', new_evidence_text) new_evidence_text = fill(new_evidence_text, break_long_words=False).replace(\"\\n\", \" \\\\\\n\") new_evidence = new_prefix", "not be printed because errors still exists\\n\") else: logger.debug(result['tree']) validation_results[bel_file]['tree'] = result['tree'] if", "os import re import csv import difflib import logging from typing import Iterable,", "BEL relationships into an e(BE:L) generated OrientDB database. Only creates the JSON file", "\"url\", \"keyword\", \"entry\", \"line_number\", \"column\", \"hint\"] df = pd.DataFrame(data=errors_or_warns_as_list_of_dicts, columns=columns) df.index += 1", "result['errors']]) + \"\\n\") else: _write_report(reports, result, report_type='errors') def repair_bel_file(bel_script_path: str, new_file_path: Optional[str] =", "str Path to BEL file or directory contaiing BEL files. force_new_db: bool Delete", "list. If single file is passed, returns a list with that path.\"\"\" if", "version should be used for validating the BEL file. Current available are 1.0,", "BEL file. new_file_path : str (optional) Export repaired version of file to new", "for x in result]) + \"\\n\") else: if sqlalchemy_connection_str: ebel.database.set_connection(sqlalchemy_connection_str) bel_files = _create_list_bel_files(bel_path=bel_script_path)", "None): \"\"\"Repair a BEL document. Parameters ---------- bel_script_path : str Path to the", "be 'warnings' or 'errors'. Returns ------- list List of file paths for the", "be constants errors_or_warns_as_list_of_dicts = [x.to_dict() for x in result[report_type]] columns = [report_type[:-1] +", "used for importing BEL relationships into an e(BE:L) generated OrientDB database. Only creates", "returns a list with that path.\"\"\" if os.path.isdir(bel_path): bel_files = [] for file", "as np import pandas as pd import ebel.database from ebel.parser import check_bel_script_line_by_line, check_bel_script,", "% reports) else: logger.info(\"\\n\".join([x.to_string() for x in result]) + \"\\n\") else: if sqlalchemy_connection_str:", "result, report_type='errors') def repair_bel_file(bel_script_path: str, new_file_path: Optional[str] = None): \"\"\"Repair a BEL document.", "_create_list_bel_files(bel_path=bel_script_path) validation_results['bel_files_checked'] = bel_files for bel_file in bel_files: # Create dict to be", "file paths to write reports to. Multiple formats of the report can be", "+ '\"' + new_evidence_text + '\"\\n\\n' new_content = new_content.replace(regex_pattern[0], new_evidence) if content !=", "in reports: if report.endswith('.csv'): df.to_csv(report) if report.endswith('.xls'): df.to_excel(report) if report.endswith('.xlsx'): df.to_excel(report, engine='xlsxwriter') if", "sqlalchemy_connection_str: ebel.database.set_connection(sqlalchemy_connection_str) bel_files = _create_list_bel_files(bel_path=bel_script_path) validation_results['bel_files_checked'] = bel_files for bel_file in bel_files: #", "validating the BEL file. Current available are 1.0, 2.0, and 2.1. Defaults to", "str Path to the BEL file. new_file_path : str (optional) Export repaired version", "different types depending on the file name suffix in reports. Parameters ---------- reports", "in result['errors']]) + \"\\n\") else: _write_report(reports, result, report_type='errors') def repair_bel_file(bel_script_path: str, new_file_path: Optional[str]", "bel_script_path.endswith('\"'): bel_script_path = bel_script_path[1:-1] if reports and reports.startswith('\"') and reports.endswith('\"'): reports = reports[1:-1]", "of check_bel_script methode. report_type : str `report_type` could be 'warnings' or 'errors'. Returns", "reports: report_paths = _write_report(reports, result, report_type='warnings') validation_results[bel_file]['reports'] = report_paths elif result['errors']: if not", "df.url = [(\"[url](\" + str(x) + \")\" if not pd.isna(x) else '') for", "-------- Task: Validate BEL script `my.bel` for BEL syntax 2.0, create error reports", "difflib import logging from typing import Iterable, Union, Optional from textwrap import fill", "\"\"\"Collect of methods used for validating a BEL file.\"\"\" import os import re", "= _create_list_bel_files(bel_path=bel_script_path) validation_results['bel_files_checked'] = bel_files for bel_file in bel_files: # Create dict to", "-v 2 -r error_report.md,error_report.json \"\"\" validation_results = dict() if bel_script_path.startswith('\"') and bel_script_path.endswith('\"'): bel_script_path", "BEL grammar version should be used for validating the BEL file. Current available", "in bel_files: # Create dict to be filled for individual BEL files. validation_results[bel_file]", "report_type='errors') def repair_bel_file(bel_script_path: str, new_file_path: Optional[str] = None): \"\"\"Repair a BEL document. Parameters", "df.columns df2 = pd.DataFrame([['---', ] * len(cols)], columns=cols) if df.hint.dtype == np.str: df.hint", "used for validating a BEL file.\"\"\" import os import re import csv import", "force_new_db: bool = False, line_by_line: bool = False, reports: Union[Iterable[str], str] = None,", "sqlalchemy_connection_str: str = None, json_file: bool = True, force_json: bool = False,): \"\"\"Validate", "BEL files. validation_results[bel_file] = dict() logger.info(f\"Processing {bel_file}\") result = check_bel_script( bel_script_path=bel_file, force_new_db=force_new_db, bel_version=bel_version,", "TODO: Write this. reports: Iterable[str] or str List of file paths to write", "bool = False, reports: Union[Iterable[str], str] = None, bel_version: str = '2_1', tree:", "import of BEL network into Cytoscape: > ebel validate my.bel -v 2 -r", "[url_template % (x, x) for x in df.line_number] df3 = pd.concat([df2, df]) df3.to_csv(report,", "constants errors_or_warns_as_list_of_dicts = [x.to_dict() for x in result[report_type]] columns = [report_type[:-1] + \"_class\",", "importing BEL relationships into an e(BE:L) generated OrientDB database. Only creates the JSON", "bel_files = [bel_path] return bel_files def _write_report(reports: Union[Iterable[str], str], result: dict, report_type: str)", "result['warnings'] and reports: report_paths = _write_report(reports, result, report_type='warnings') validation_results[bel_file]['reports'] = report_paths elif result['errors']:", "errors. Defaults to True. force_json: bool If True, will create an importable JSON", "validation_results['bel_files_checked'] = bel_files for bel_file in bel_files: # Create dict to be filled", "2.0, create error reports in Markdown and JSON format. In case of no", "of BEL network into Cytoscape: > ebel validate my.bel -v 2 -r error_report.md,error_report.json", "file is passed, returns a list with that path.\"\"\" if os.path.isdir(bel_path): bel_files =", "JSON file for the import of BEL network into Cytoscape: > ebel validate", "reports: logger.info(\"Wrote report to %s\\n\" % reports) else: logger.info(\"\\n\".join([x.to_string() for x in result])", "% (x, x) for x in df.line_number] df3 = pd.concat([df2, df]) df3.to_csv(report, sep=\"|\",", "of methods used for validating a BEL file.\"\"\" import os import re import", "df.entry = df.entry.str.replace(r'\\|', '&#124;') df.url = [(\"[url](\" + str(x) + \")\" if not", "str = '2_1', tree: bool = False, sqlalchemy_connection_str: str = None, json_file: bool", "with open(bel_script_path, \"w\") as output_file: output_file.write(new_content) def _write_odb_json(bel_path: str, results: dict, bel_version: str)", "dict() if bel_script_path.startswith('\"') and bel_script_path.endswith('\"'): bel_script_path = bel_script_path[1:-1] if reports and reports.startswith('\"') and", "_write_odb_json(bel_path: str, results: dict, bel_version: str) -> str: json_path = bel_path + \".json\"", "is perhaps not working result = check_bel_script_line_by_line(bel_script_path, error_report_file_path=reports, bel_version=bel_version) if reports: logger.info(\"Wrote report", "logger.info('\\n'.join([x.to_string() for x in result['errors']]) + \"\\n\") else: _write_report(reports, result, report_type='errors') def repair_bel_file(bel_script_path:", "\" \", new_evidence_text) new_evidence_text = re.sub(r'(\\\\)(\\w)', r'\\g<2>', new_evidence_text) new_evidence_text = fill(new_evidence_text, break_long_words=False).replace(\"\\n\", \"", "= bel_path + \".json\" if int(bel_version[0]) > 1: json_tree = bel_to_json(results['tree']) open(json_path, \"w\").write(json_tree)", "list of report file names. result : dict return value of check_bel_script methode.", "for x in result[report_type]] columns = [report_type[:-1] + \"_class\", \"url\", \"keyword\", \"entry\", \"line_number\",", "str): reports = reports.split(\",\") for report in reports: if report.endswith('.csv'): df.to_csv(report) if report.endswith('.xls'):", "reports.endswith('\"'): reports = reports[1:-1] if line_by_line: # TODO: This is perhaps not working", "x in result['errors']]) + \"\\n\") else: _write_report(reports, result, report_type='errors') def repair_bel_file(bel_script_path: str, new_file_path:", "end of line) with open(bel_script_path, \"r\", encoding=\"utf-8\") as belfile: content = belfile.read() new_content", "force_json: bool = False,): \"\"\"Validate BEL script for correct syntax following eBNF grammar.", "formats include: CSV, TSV, TXT, XLS, XLSX, JSON, HTML, MD bel_version: {'1', '2',", "bel_script_path[1:-1] if reports and reports.startswith('\"') and reports.endswith('\"'): reports = reports[1:-1] if line_by_line: #", "errors create a JSON file for the import of BEL network into Cytoscape:", "and results for each BEL file processed. Examples -------- Task: Validate BEL script", "False. line_by_line: bool TODO: Write this. reports: Iterable[str] or str List of file", "= pd.DataFrame([['---', ] * len(cols)], columns=cols) if df.hint.dtype == np.str: df.hint = df.hint.str.replace(r'\\|',", "for missing continuous line (\\ at the end of line) with open(bel_script_path, \"r\",", "syntax errors. Defaults to True. force_json: bool If True, will create an importable", "belfile.read() new_content = content for regex_pattern in re.findall(r'\\n((SET\\s+(DOCUMENT\\s+Description|Evidence|SupportingText)' r'\\s*=\\s*)\"(((?<=\\\\)\"|[^\"])+)\"\\s*\\n*)', content): if regex_pattern[2].startswith(\"DOCUMENT\"): new_prefix", "-> list: \"\"\"Write report in different types depending on the file name suffix", "[(\"[url](\" + str(x) + \")\" if not pd.isna(x) else '') for x in", "x in df.line_number] df3 = pd.concat([df2, df]) df3.to_csv(report, sep=\"|\", index=False, quoting=csv.QUOTE_NONE, escapechar=\"\\\\\") return", "= os.path.join(bel_path, file) bel_files.append(bel_file_path) else: bel_files = [bel_path] return bel_files def _write_report(reports: Union[Iterable[str],", "Iterable[str] or str List of report formats or comma separated list of report", "new_file_path : str (optional) Export repaired version of file to new path. \"\"\"", "force_json: json_file = _write_odb_json(bel_path=bel_file, results=result, bel_version=bel_version) validation_results[bel_file]['json'] = json_file if tree: if result['errors']:", "\"column\", \"hint\"] df = pd.DataFrame(data=errors_or_warns_as_list_of_dicts, columns=columns) df.index += 1 if isinstance(reports, str): reports", "json_path def _create_list_bel_files(bel_path: str) -> list: \"\"\"Export all BEL files in directory as", "files. force_new_db: bool Delete current database of namespaces/values and generate a new one.", "generate a new one. Defaults to False. line_by_line: bool TODO: Write this. reports:", "bool = False, sqlalchemy_connection_str: str = None, json_file: bool = True, force_json: bool", "relationships derived from the BEL file. Defaults to False. sqlalchemy_connection_str: str Path to", "errors_or_warns_as_list_of_dicts = [x.to_dict() for x in result[report_type]] columns = [report_type[:-1] + \"_class\", \"url\",", "Optional[str] = None): \"\"\"Repair a BEL document. Parameters ---------- bel_script_path : str Path", "new path. \"\"\" # if evidence: # regular expression for missing continuous line", "name suffix in reports. Parameters ---------- reports : Iterable[str] or str List of", "methode. report_type : str `report_type` could be 'warnings' or 'errors'. Returns ------- list", "None, json_file: bool = True, force_json: bool = False,): \"\"\"Validate BEL script for", "'&#124;') if df.entry.dtype == np.str: df.entry = df.entry.str.replace(r'\\|', '&#124;') df.url = [(\"[url](\" +", "on the file name suffix in reports. Parameters ---------- reports : Iterable[str] or", "and JSON format. In case of no errors create a JSON file for", "False. sqlalchemy_connection_str: str Path to SQLLite database to be used for storing/looking up", "used for storing/looking up used namespaces and values. If None given, it uses", "into an e(BE:L) generated OrientDB database. Only creates the JSON file when there", "MD bel_version: {'1', '2', '2_1'} Which BEL grammar version should be used for", "format. In case of no errors create a JSON file for the import", "In case of no errors create a JSON file for the import of", "columns=cols) if df.hint.dtype == np.str: df.hint = df.hint.str.replace(r'\\|', '&#124;') if df.entry.dtype == np.str:", "dict return value of check_bel_script methode. report_type : str `report_type` could be 'warnings'", "report_paths = _write_report(reports, result, report_type='warnings') validation_results[bel_file]['reports'] = report_paths elif result['errors']: if not reports:", "for x in df.line_number] df3 = pd.concat([df2, df]) df3.to_csv(report, sep=\"|\", index=False, quoting=csv.QUOTE_NONE, escapechar=\"\\\\\")", "new_evidence_text) new_evidence_text = re.sub(r'(\\\\)(\\w)', r'\\g<2>', new_evidence_text) new_evidence_text = fill(new_evidence_text, break_long_words=False).replace(\"\\n\", \" \\\\\\n\") new_evidence", "errors. Defaults to False. Returns ------- dict Dictionary of file paths and results", "tree: if result['errors']: logger.error(\"Tree can not be printed because errors still exists\\n\") else:", "file to new path. \"\"\" # if evidence: # regular expression for missing", "validation_results[bel_file]['json'] = json_file if tree: if result['errors']: logger.error(\"Tree can not be printed because", "OrientDB database. Only creates the JSON file when there are no grammar or", "pd.DataFrame([['---', ] * len(cols)], columns=cols) if df.hint.dtype == np.str: df.hint = df.hint.str.replace(r'\\|', '&#124;')", "to be used for storing/looking up used namespaces and values. If None given,", "still exists\\n\") else: logger.debug(result['tree']) validation_results[bel_file]['tree'] = result['tree'] if result['warnings'] and reports: report_paths =", "engine='xlsxwriter') if report.endswith('.tsv'): df.to_csv(report, sep='\\t') if report.endswith('.json'): df.to_json(report) if report.endswith('.txt'): open(report, \"w\").write(df.to_string()) if", "file for the import of BEL network into Cytoscape: > ebel validate my.bel", "else: new_prefix = \"SET Support = \" new_evidence_text = re.sub(r\"(\\\\?[\\r\\n]+)|\\\\ \", \" \",", "error reports in Markdown and JSON format. In case of no errors create", "used namespaces and values. If None given, it uses the generated e(BE:L) database", "columns = [report_type[:-1] + \"_class\", \"url\", \"keyword\", \"entry\", \"line_number\", \"column\", \"hint\"] df =", "json_file = _write_odb_json(bel_path=bel_file, results=result, bel_version=bel_version) validation_results[bel_file]['json'] = json_file if tree: if result['errors']: logger.error(\"Tree", "str, new_file_path: Optional[str] = None): \"\"\"Repair a BEL document. Parameters ---------- bel_script_path :", "a BEL document. Parameters ---------- bel_script_path : str Path to the BEL file.", "report to %s\\n\" % reports) else: logger.info(\"\\n\".join([x.to_string() for x in result]) + \"\\n\")", "str: json_path = bel_path + \".json\" if int(bel_version[0]) > 1: json_tree = bel_to_json(results['tree'])", "line (\\ at the end of line) with open(bel_script_path, \"r\", encoding=\"utf-8\") as belfile:", "bel_files = _create_list_bel_files(bel_path=bel_script_path) validation_results['bel_files_checked'] = bel_files for bel_file in bel_files: # Create dict", "bool If True, will create an importable JSON file even if there are", "error_report.md,error_report.json \"\"\" validation_results = dict() if bel_script_path.startswith('\"') and bel_script_path.endswith('\"'): bel_script_path = bel_script_path[1:-1] if", "logger.info(\"Wrote report to %s\\n\" % reports) else: logger.info(\"\\n\".join([x.to_string() for x in result]) +", "\")\" if not pd.isna(x) else '') for x in df.url] url_template = \"[%s](\"", "for each BEL file processed. Examples -------- Task: Validate BEL script `my.bel` for", "'2_1'} Which BEL grammar version should be used for validating the BEL file.", "and reports: report_paths = _write_report(reports, result, report_type='warnings') validation_results[bel_file]['reports'] = report_paths elif result['errors']: if", "'&#124;') df.url = [(\"[url](\" + str(x) + \")\" if not pd.isna(x) else '')", "Defaults to False. Returns ------- dict Dictionary of file paths and results for", "and generate a new one. Defaults to False. line_by_line: bool TODO: Write this.", "str) -> str: json_path = bel_path + \".json\" if int(bel_version[0]) > 1: json_tree", "= dict() logger.info(f\"Processing {bel_file}\") result = check_bel_script( bel_script_path=bel_file, force_new_db=force_new_db, bel_version=bel_version, ) if json_file:", "new_content: if new_file_path: with open(new_file_path + \".diff2repaired\", \"w\") as new_file: new_file.write('\\n'.join(list(difflib.ndiff(content.split(\"\\n\"), new_content.split(\"\\n\"))))) else:", "to SQLLite database to be used for storing/looking up used namespaces and values.", "import check_bel_script_line_by_line, check_bel_script, bel_to_json logger = logging.getLogger(__name__) def validate_bel_file(bel_script_path: str, force_new_db: bool =", "df.to_excel(report) if report.endswith('.xlsx'): df.to_excel(report, engine='xlsxwriter') if report.endswith('.tsv'): df.to_csv(report, sep='\\t') if report.endswith('.json'): df.to_json(report) if", "or force_json: json_file = _write_odb_json(bel_path=bel_file, results=result, bel_version=bel_version) validation_results[bel_file]['json'] = json_file if tree: if", "df.to_csv(report, sep='\\t') if report.endswith('.json'): df.to_json(report) if report.endswith('.txt'): open(report, \"w\").write(df.to_string()) if report.endswith('.html'): df.to_html(report) if", "in df.url] url_template = \"[%s](\" + report.split(\".bel.\")[0] + \".bel?expanded=true&viewer=simple#L%s)\" df.line_number = [url_template %", "file or directory contaiing BEL files. force_new_db: bool Delete current database of namespaces/values", "correct syntax following eBNF grammar. Parameters ---------- bel_script_path: str Path to BEL file", "for bel_file in bel_files: # Create dict to be filled for individual BEL", "+ \")\" if not pd.isna(x) else '') for x in df.url] url_template =", "JSON file when there are no grammar or syntax errors. Defaults to True.", "== np.str: df.entry = df.entry.str.replace(r'\\|', '&#124;') df.url = [(\"[url](\" + str(x) + \")\"", "pandas as pd import ebel.database from ebel.parser import check_bel_script_line_by_line, check_bel_script, bel_to_json logger =", "filled for individual BEL files. validation_results[bel_file] = dict() logger.info(f\"Processing {bel_file}\") result = check_bel_script(", "the reports written. \"\"\" # TODO: report_type options should be constants errors_or_warns_as_list_of_dicts =", "\", \" \", regex_pattern[3].strip()) new_evidence_text = re.sub(r\"\\s{2,}\", \" \", new_evidence_text) new_evidence_text = re.sub(r'(\\\\)(\\w)',", "be used for validating the BEL file. Current available are 1.0, 2.0, and", "TSV, TXT, XLS, XLSX, JSON, HTML, MD bel_version: {'1', '2', '2_1'} Which BEL", "# if evidence: # regular expression for missing continuous line (\\ at the", "import numpy as np import pandas as pd import ebel.database from ebel.parser import", "values. If None given, it uses the generated e(BE:L) database (default). json_file: bool", "fill import numpy as np import pandas as pd import ebel.database from ebel.parser", "it uses the generated e(BE:L) database (default). json_file: bool If True, generates a", "tree of relationships derived from the BEL file. Defaults to False. sqlalchemy_connection_str: str", "if reports and reports.startswith('\"') and reports.endswith('\"'): reports = reports[1:-1] if line_by_line: # TODO:", "validation_results[bel_file]['reports'] = report_paths elif result['errors']: if not reports: logger.info('\\n'.join([x.to_string() for x in result['errors']])", "_create_list_bel_files(bel_path: str) -> list: \"\"\"Export all BEL files in directory as list. If", "------- list List of file paths for the reports written. \"\"\" # TODO:", "for report in reports: if report.endswith('.csv'): df.to_csv(report) if report.endswith('.xls'): df.to_excel(report) if report.endswith('.xlsx'): df.to_excel(report,", "if df.hint.dtype == np.str: df.hint = df.hint.str.replace(r'\\|', '&#124;') if df.entry.dtype == np.str: df.entry", "can be generated at once. Acceptable formats include: CSV, TSV, TXT, XLS, XLSX,", "new_file: new_file.write('\\n'.join(list(difflib.ndiff(content.split(\"\\n\"), new_content.split(\"\\n\"))))) else: with open(bel_script_path, \"w\") as output_file: output_file.write(new_content) def _write_odb_json(bel_path: str,", "repaired version of file to new path. \"\"\" # if evidence: # regular", "to BEL file or directory contaiing BEL files. force_new_db: bool Delete current database", "current database of namespaces/values and generate a new one. Defaults to False. line_by_line:", "reports: Iterable[str] or str List of file paths to write reports to. Multiple", "can be used for importing BEL relationships into an e(BE:L) generated OrientDB database.", "repair_bel_file(bel_script_path: str, new_file_path: Optional[str] = None): \"\"\"Repair a BEL document. Parameters ---------- bel_script_path", "continuous line (\\ at the end of line) with open(bel_script_path, \"r\", encoding=\"utf-8\") as", "syntax following eBNF grammar. Parameters ---------- bel_script_path: str Path to BEL file or", "not working result = check_bel_script_line_by_line(bel_script_path, error_report_file_path=reports, bel_version=bel_version) if reports: logger.info(\"Wrote report to %s\\n\"", "df.url] url_template = \"[%s](\" + report.split(\".bel.\")[0] + \".bel?expanded=true&viewer=simple#L%s)\" df.line_number = [url_template % (x,", "generates a JSON file that can be used for importing BEL relationships into", "new_prefix = \"SET Support = \" new_evidence_text = re.sub(r\"(\\\\?[\\r\\n]+)|\\\\ \", \" \", regex_pattern[3].strip())", "generated e(BE:L) database (default). json_file: bool If True, generates a JSON file that", "network into Cytoscape: > ebel validate my.bel -v 2 -r error_report.md,error_report.json \"\"\" validation_results", "import difflib import logging from typing import Iterable, Union, Optional from textwrap import", "np import pandas as pd import ebel.database from ebel.parser import check_bel_script_line_by_line, check_bel_script, bel_to_json", "---------- bel_script_path: str Path to BEL file or directory contaiing BEL files. force_new_db:", "of relationships derived from the BEL file. Defaults to False. sqlalchemy_connection_str: str Path", "(\\ at the end of line) with open(bel_script_path, \"r\", encoding=\"utf-8\") as belfile: content", "logger.info(f\"Processing {bel_file}\") result = check_bel_script( bel_script_path=bel_file, force_new_db=force_new_db, bel_version=bel_version, ) if json_file: if not", "of file paths for the reports written. \"\"\" # TODO: report_type options should", "Support = \" new_evidence_text = re.sub(r\"(\\\\?[\\r\\n]+)|\\\\ \", \" \", regex_pattern[3].strip()) new_evidence_text = re.sub(r\"\\s{2,}\",", "version. tree: bool Generates a tree of relationships derived from the BEL file.", "options should be constants errors_or_warns_as_list_of_dicts = [x.to_dict() for x in result[report_type]] columns =", "re import csv import difflib import logging from typing import Iterable, Union, Optional", "result['errors'] or force_json: json_file = _write_odb_json(bel_path=bel_file, results=result, bel_version=bel_version) validation_results[bel_file]['json'] = json_file if tree:", "new_evidence_text = re.sub(r'(\\\\)(\\w)', r'\\g<2>', new_evidence_text) new_evidence_text = fill(new_evidence_text, break_long_words=False).replace(\"\\n\", \" \\\\\\n\") new_evidence =", "storing/looking up used namespaces and values. If None given, it uses the generated", "else: if sqlalchemy_connection_str: ebel.database.set_connection(sqlalchemy_connection_str) bel_files = _create_list_bel_files(bel_path=bel_script_path) validation_results['bel_files_checked'] = bel_files for bel_file in", "\".bel?expanded=true&viewer=simple#L%s)\" df.line_number = [url_template % (x, x) for x in df.line_number] df3 =", "not reports: logger.info('\\n'.join([x.to_string() for x in result['errors']]) + \"\\n\") else: _write_report(reports, result, report_type='errors')", "my.bel -v 2 -r error_report.md,error_report.json \"\"\" validation_results = dict() if bel_script_path.startswith('\"') and bel_script_path.endswith('\"'):", "List of report formats or comma separated list of report file names. result", "of file to new path. \"\"\" # if evidence: # regular expression for", "when there are no grammar or syntax errors. Defaults to True. force_json: bool", "following eBNF grammar. Parameters ---------- bel_script_path: str Path to BEL file or directory", "new_content.replace(regex_pattern[0], new_evidence) if content != new_content: if new_file_path: with open(new_file_path + \".diff2repaired\", \"w\")", "file. Current available are 1.0, 2.0, and 2.1. Defaults to the most recent", "= result['tree'] if result['warnings'] and reports: report_paths = _write_report(reports, result, report_type='warnings') validation_results[bel_file]['reports'] =", "BEL files. force_new_db: bool Delete current database of namespaces/values and generate a new", "to new path. \"\"\" # if evidence: # regular expression for missing continuous", "logging.getLogger(__name__) def validate_bel_file(bel_script_path: str, force_new_db: bool = False, line_by_line: bool = False, reports:", "2.1. Defaults to the most recent version. tree: bool Generates a tree of", "= \" new_evidence_text = re.sub(r\"(\\\\?[\\r\\n]+)|\\\\ \", \" \", regex_pattern[3].strip()) new_evidence_text = re.sub(r\"\\s{2,}\", \"", "file even if there are namespace/value errors. Defaults to False. Returns ------- dict", "x) for x in df.line_number] df3 = pd.concat([df2, df]) df3.to_csv(report, sep=\"|\", index=False, quoting=csv.QUOTE_NONE,", "\"SET Support = \" new_evidence_text = re.sub(r\"(\\\\?[\\r\\n]+)|\\\\ \", \" \", regex_pattern[3].strip()) new_evidence_text =", "def _write_report(reports: Union[Iterable[str], str], result: dict, report_type: str) -> list: \"\"\"Write report in", "validate_bel_file(bel_script_path: str, force_new_db: bool = False, line_by_line: bool = False, reports: Union[Iterable[str], str]", "content): if regex_pattern[2].startswith(\"DOCUMENT\"): new_prefix = \"SET DOCUMENT Description = \" else: new_prefix =", "TODO: report_type options should be constants errors_or_warns_as_list_of_dicts = [x.to_dict() for x in result[report_type]]", "new one. Defaults to False. line_by_line: bool TODO: Write this. reports: Iterable[str] or", "\"\\n\") else: if sqlalchemy_connection_str: ebel.database.set_connection(sqlalchemy_connection_str) bel_files = _create_list_bel_files(bel_path=bel_script_path) validation_results['bel_files_checked'] = bel_files for bel_file", "value of check_bel_script methode. report_type : str `report_type` could be 'warnings' or 'errors'.", "BEL script for correct syntax following eBNF grammar. Parameters ---------- bel_script_path: str Path", "bool TODO: Write this. reports: Iterable[str] or str List of file paths to", "if df.entry.dtype == np.str: df.entry = df.entry.str.replace(r'\\|', '&#124;') df.url = [(\"[url](\" + str(x)", "2 -r error_report.md,error_report.json \"\"\" validation_results = dict() if bel_script_path.startswith('\"') and bel_script_path.endswith('\"'): bel_script_path =", "[bel_path] return bel_files def _write_report(reports: Union[Iterable[str], str], result: dict, report_type: str) -> list:", "for BEL syntax 2.0, create error reports in Markdown and JSON format. In", "new_evidence_text = re.sub(r\"\\s{2,}\", \" \", new_evidence_text) new_evidence_text = re.sub(r'(\\\\)(\\w)', r'\\g<2>', new_evidence_text) new_evidence_text =", "Defaults to False. sqlalchemy_connection_str: str Path to SQLLite database to be used for", "files in directory as list. If single file is passed, returns a list", "_write_report(reports, result, report_type='warnings') validation_results[bel_file]['reports'] = report_paths elif result['errors']: if not reports: logger.info('\\n'.join([x.to_string() for", "pd import ebel.database from ebel.parser import check_bel_script_line_by_line, check_bel_script, bel_to_json logger = logging.getLogger(__name__) def", "_write_report(reports: Union[Iterable[str], str], result: dict, report_type: str) -> list: \"\"\"Write report in different", "path.\"\"\" if os.path.isdir(bel_path): bel_files = [] for file in os.listdir(bel_path): if file.endswith(\".bel\"): bel_file_path", "= new_content.replace(regex_pattern[0], new_evidence) if content != new_content: if new_file_path: with open(new_file_path + \".diff2repaired\",", "\"[%s](\" + report.split(\".bel.\")[0] + \".bel?expanded=true&viewer=simple#L%s)\" df.line_number = [url_template % (x, x) for x", "isinstance(reports, str): reports = reports.split(\",\") for report in reports: if report.endswith('.csv'): df.to_csv(report) if", "bel_files: # Create dict to be filled for individual BEL files. validation_results[bel_file] =", "bel_script_path: str Path to BEL file or directory contaiing BEL files. force_new_db: bool", "into Cytoscape: > ebel validate my.bel -v 2 -r error_report.md,error_report.json \"\"\" validation_results =", "reports : Iterable[str] or str List of report formats or comma separated list", "paths to write reports to. Multiple formats of the report can be generated", "\"\"\" validation_results = dict() if bel_script_path.startswith('\"') and bel_script_path.endswith('\"'): bel_script_path = bel_script_path[1:-1] if reports", "+ report.split(\".bel.\")[0] + \".bel?expanded=true&viewer=simple#L%s)\" df.line_number = [url_template % (x, x) for x in", "str, force_new_db: bool = False, line_by_line: bool = False, reports: Union[Iterable[str], str] =", "to the most recent version. tree: bool Generates a tree of relationships derived", "df.to_csv(report) if report.endswith('.xls'): df.to_excel(report) if report.endswith('.xlsx'): df.to_excel(report, engine='xlsxwriter') if report.endswith('.tsv'): df.to_csv(report, sep='\\t') if", "pd.DataFrame(data=errors_or_warns_as_list_of_dicts, columns=columns) df.index += 1 if isinstance(reports, str): reports = reports.split(\",\") for report", "XLSX, JSON, HTML, MD bel_version: {'1', '2', '2_1'} Which BEL grammar version should", "sep='\\t') if report.endswith('.json'): df.to_json(report) if report.endswith('.txt'): open(report, \"w\").write(df.to_string()) if report.endswith('.html'): df.to_html(report) if report.endswith('.md'):", "= reports.split(\",\") for report in reports: if report.endswith('.csv'): df.to_csv(report) if report.endswith('.xls'): df.to_excel(report) if", "logger.info(\"\\n\".join([x.to_string() for x in result]) + \"\\n\") else: if sqlalchemy_connection_str: ebel.database.set_connection(sqlalchemy_connection_str) bel_files =", "contaiing BEL files. force_new_db: bool Delete current database of namespaces/values and generate a", "all BEL files in directory as list. If single file is passed, returns", "= None, json_file: bool = True, force_json: bool = False,): \"\"\"Validate BEL script", "else: _write_report(reports, result, report_type='errors') def repair_bel_file(bel_script_path: str, new_file_path: Optional[str] = None): \"\"\"Repair a", "report.endswith('.md'): cols = df.columns df2 = pd.DataFrame([['---', ] * len(cols)], columns=cols) if df.hint.dtype", "write reports to. Multiple formats of the report can be generated at once.", "report_paths elif result['errors']: if not reports: logger.info('\\n'.join([x.to_string() for x in result['errors']]) + \"\\n\")", ": Iterable[str] or str List of report formats or comma separated list of", "return json_path def _create_list_bel_files(bel_path: str) -> list: \"\"\"Export all BEL files in directory", "\" \\\\\\n\") new_evidence = new_prefix + '\"' + new_evidence_text + '\"\\n\\n' new_content =", "report_type: str) -> list: \"\"\"Write report in different types depending on the file", "result['errors']: if not reports: logger.info('\\n'.join([x.to_string() for x in result['errors']]) + \"\\n\") else: _write_report(reports,", "new_evidence) if content != new_content: if new_file_path: with open(new_file_path + \".diff2repaired\", \"w\") as", "= \"SET Support = \" new_evidence_text = re.sub(r\"(\\\\?[\\r\\n]+)|\\\\ \", \" \", regex_pattern[3].strip()) new_evidence_text", "directory as list. If single file is passed, returns a list with that", "regex_pattern[2].startswith(\"DOCUMENT\"): new_prefix = \"SET DOCUMENT Description = \" else: new_prefix = \"SET Support", "df.line_number = [url_template % (x, x) for x in df.line_number] df3 = pd.concat([df2,", "names. result : dict return value of check_bel_script methode. report_type : str `report_type`", "directory contaiing BEL files. force_new_db: bool Delete current database of namespaces/values and generate", "= False, line_by_line: bool = False, reports: Union[Iterable[str], str] = None, bel_version: str", "%s\\n\" % reports) else: logger.info(\"\\n\".join([x.to_string() for x in result]) + \"\\n\") else: if", "error_report_file_path=reports, bel_version=bel_version) if reports: logger.info(\"Wrote report to %s\\n\" % reports) else: logger.info(\"\\n\".join([x.to_string() for", "bel_script_path=bel_file, force_new_db=force_new_db, bel_version=bel_version, ) if json_file: if not result['errors'] or force_json: json_file =", "# TODO: report_type options should be constants errors_or_warns_as_list_of_dicts = [x.to_dict() for x in", "reports = reports.split(\",\") for report in reports: if report.endswith('.csv'): df.to_csv(report) if report.endswith('.xls'): df.to_excel(report)", "= bel_files for bel_file in bel_files: # Create dict to be filled for", "in os.listdir(bel_path): if file.endswith(\".bel\"): bel_file_path = os.path.join(bel_path, file) bel_files.append(bel_file_path) else: bel_files = [bel_path]", "json_path = bel_path + \".json\" if int(bel_version[0]) > 1: json_tree = bel_to_json(results['tree']) open(json_path,", "= False, sqlalchemy_connection_str: str = None, json_file: bool = True, force_json: bool =", "file) bel_files.append(bel_file_path) else: bel_files = [bel_path] return bel_files def _write_report(reports: Union[Iterable[str], str], result:", "database to be used for storing/looking up used namespaces and values. If None", "file in os.listdir(bel_path): if file.endswith(\".bel\"): bel_file_path = os.path.join(bel_path, file) bel_files.append(bel_file_path) else: bel_files =", "is passed, returns a list with that path.\"\"\" if os.path.isdir(bel_path): bel_files = []", "regex_pattern in re.findall(r'\\n((SET\\s+(DOCUMENT\\s+Description|Evidence|SupportingText)' r'\\s*=\\s*)\"(((?<=\\\\)\"|[^\"])+)\"\\s*\\n*)', content): if regex_pattern[2].startswith(\"DOCUMENT\"): new_prefix = \"SET DOCUMENT Description =", "or str List of report formats or comma separated list of report file", "to False. line_by_line: bool TODO: Write this. reports: Iterable[str] or str List of", "sqlalchemy_connection_str: str Path to SQLLite database to be used for storing/looking up used", "if report.endswith('.csv'): df.to_csv(report) if report.endswith('.xls'): df.to_excel(report) if report.endswith('.xlsx'): df.to_excel(report, engine='xlsxwriter') if report.endswith('.tsv'): df.to_csv(report,", "if not result['errors'] or force_json: json_file = _write_odb_json(bel_path=bel_file, results=result, bel_version=bel_version) validation_results[bel_file]['json'] = json_file", "= [x.to_dict() for x in result[report_type]] columns = [report_type[:-1] + \"_class\", \"url\", \"keyword\",", "Delete current database of namespaces/values and generate a new one. Defaults to False.", "= re.sub(r\"\\s{2,}\", \" \", new_evidence_text) new_evidence_text = re.sub(r'(\\\\)(\\w)', r'\\g<2>', new_evidence_text) new_evidence_text = fill(new_evidence_text,", "None, bel_version: str = '2_1', tree: bool = False, sqlalchemy_connection_str: str = None,", "str = None, json_file: bool = True, force_json: bool = False,): \"\"\"Validate BEL", "file when there are no grammar or syntax errors. Defaults to True. force_json:", "reports.startswith('\"') and reports.endswith('\"'): reports = reports[1:-1] if line_by_line: # TODO: This is perhaps", "x in result[report_type]] columns = [report_type[:-1] + \"_class\", \"url\", \"keyword\", \"entry\", \"line_number\", \"column\",", "for individual BEL files. validation_results[bel_file] = dict() logger.info(f\"Processing {bel_file}\") result = check_bel_script( bel_script_path=bel_file,", "database (default). json_file: bool If True, generates a JSON file that can be", "= [bel_path] return bel_files def _write_report(reports: Union[Iterable[str], str], result: dict, report_type: str) ->", "if json_file: if not result['errors'] or force_json: json_file = _write_odb_json(bel_path=bel_file, results=result, bel_version=bel_version) validation_results[bel_file]['json']", "suffix in reports. Parameters ---------- reports : Iterable[str] or str List of report", "+ \".bel?expanded=true&viewer=simple#L%s)\" df.line_number = [url_template % (x, x) for x in df.line_number] df3", "columns=columns) df.index += 1 if isinstance(reports, str): reports = reports.split(\",\") for report in", "Union[Iterable[str], str] = None, bel_version: str = '2_1', tree: bool = False, sqlalchemy_connection_str:", "_write_report(reports, result, report_type='errors') def repair_bel_file(bel_script_path: str, new_file_path: Optional[str] = None): \"\"\"Repair a BEL", "in re.findall(r'\\n((SET\\s+(DOCUMENT\\s+Description|Evidence|SupportingText)' r'\\s*=\\s*)\"(((?<=\\\\)\"|[^\"])+)\"\\s*\\n*)', content): if regex_pattern[2].startswith(\"DOCUMENT\"): new_prefix = \"SET DOCUMENT Description = \"", "processed. Examples -------- Task: Validate BEL script `my.bel` for BEL syntax 2.0, create", "if result['warnings'] and reports: report_paths = _write_report(reports, result, report_type='warnings') validation_results[bel_file]['reports'] = report_paths elif", "bel_file in bel_files: # Create dict to be filled for individual BEL files.", "namespaces and values. If None given, it uses the generated e(BE:L) database (default).", "Parameters ---------- reports : Iterable[str] or str List of report formats or comma", "file that can be used for importing BEL relationships into an e(BE:L) generated", "namespace/value errors. Defaults to False. Returns ------- dict Dictionary of file paths and", "str `report_type` could be 'warnings' or 'errors'. Returns ------- list List of file", "= belfile.read() new_content = content for regex_pattern in re.findall(r'\\n((SET\\s+(DOCUMENT\\s+Description|Evidence|SupportingText)' r'\\s*=\\s*)\"(((?<=\\\\)\"|[^\"])+)\"\\s*\\n*)', content): if regex_pattern[2].startswith(\"DOCUMENT\"):", "expression for missing continuous line (\\ at the end of line) with open(bel_script_path,", "cols = df.columns df2 = pd.DataFrame([['---', ] * len(cols)], columns=cols) if df.hint.dtype ==", "in different types depending on the file name suffix in reports. Parameters ----------", "result[report_type]] columns = [report_type[:-1] + \"_class\", \"url\", \"keyword\", \"entry\", \"line_number\", \"column\", \"hint\"] df", "os.path.join(bel_path, file) bel_files.append(bel_file_path) else: bel_files = [bel_path] return bel_files def _write_report(reports: Union[Iterable[str], str],", "False. Returns ------- dict Dictionary of file paths and results for each BEL", "check_bel_script methode. report_type : str `report_type` could be 'warnings' or 'errors'. Returns -------", "results=result, bel_version=bel_version) validation_results[bel_file]['json'] = json_file if tree: if result['errors']: logger.error(\"Tree can not be", "to %s\\n\" % reports) else: logger.info(\"\\n\".join([x.to_string() for x in result]) + \"\\n\") else:", "result['errors']: logger.error(\"Tree can not be printed because errors still exists\\n\") else: logger.debug(result['tree']) validation_results[bel_file]['tree']", ": dict return value of check_bel_script methode. report_type : str `report_type` could be", "Dictionary of file paths and results for each BEL file processed. Examples --------", "Optional from textwrap import fill import numpy as np import pandas as pd", "the end of line) with open(bel_script_path, \"r\", encoding=\"utf-8\") as belfile: content = belfile.read()", "for x in result['errors']]) + \"\\n\") else: _write_report(reports, result, report_type='errors') def repair_bel_file(bel_script_path: str,", "new_prefix + '\"' + new_evidence_text + '\"\\n\\n' new_content = new_content.replace(regex_pattern[0], new_evidence) if content", "for the reports written. \"\"\" # TODO: report_type options should be constants errors_or_warns_as_list_of_dicts", "SQLLite database to be used for storing/looking up used namespaces and values. If", "_write_odb_json(bel_path=bel_file, results=result, bel_version=bel_version) validation_results[bel_file]['json'] = json_file if tree: if result['errors']: logger.error(\"Tree can not", "BEL script `my.bel` for BEL syntax 2.0, create error reports in Markdown and", "\"\"\"Repair a BEL document. Parameters ---------- bel_script_path : str Path to the BEL", "most recent version. tree: bool Generates a tree of relationships derived from the", "from the BEL file. Defaults to False. sqlalchemy_connection_str: str Path to SQLLite database", "None given, it uses the generated e(BE:L) database (default). json_file: bool If True,", "[x.to_dict() for x in result[report_type]] columns = [report_type[:-1] + \"_class\", \"url\", \"keyword\", \"entry\",", "= df.hint.str.replace(r'\\|', '&#124;') if df.entry.dtype == np.str: df.entry = df.entry.str.replace(r'\\|', '&#124;') df.url =", "result]) + \"\\n\") else: if sqlalchemy_connection_str: ebel.database.set_connection(sqlalchemy_connection_str) bel_files = _create_list_bel_files(bel_path=bel_script_path) validation_results['bel_files_checked'] = bel_files", "generated OrientDB database. Only creates the JSON file when there are no grammar", "errors still exists\\n\") else: logger.debug(result['tree']) validation_results[bel_file]['tree'] = result['tree'] if result['warnings'] and reports: report_paths", "os.path.isdir(bel_path): bel_files = [] for file in os.listdir(bel_path): if file.endswith(\".bel\"): bel_file_path = os.path.join(bel_path,", "\"\"\" # if evidence: # regular expression for missing continuous line (\\ at", "new_content = content for regex_pattern in re.findall(r'\\n((SET\\s+(DOCUMENT\\s+Description|Evidence|SupportingText)' r'\\s*=\\s*)\"(((?<=\\\\)\"|[^\"])+)\"\\s*\\n*)', content): if regex_pattern[2].startswith(\"DOCUMENT\"): new_prefix =", "r'\\g<2>', new_evidence_text) new_evidence_text = fill(new_evidence_text, break_long_words=False).replace(\"\\n\", \" \\\\\\n\") new_evidence = new_prefix + '\"'", "written. \"\"\" # TODO: report_type options should be constants errors_or_warns_as_list_of_dicts = [x.to_dict() for", "reports: if report.endswith('.csv'): df.to_csv(report) if report.endswith('.xls'): df.to_excel(report) if report.endswith('.xlsx'): df.to_excel(report, engine='xlsxwriter') if report.endswith('.tsv'):", "a JSON file for the import of BEL network into Cytoscape: > ebel", "= fill(new_evidence_text, break_long_words=False).replace(\"\\n\", \" \\\\\\n\") new_evidence = new_prefix + '\"' + new_evidence_text +", "else: logger.info(\"\\n\".join([x.to_string() for x in result]) + \"\\n\") else: if sqlalchemy_connection_str: ebel.database.set_connection(sqlalchemy_connection_str) bel_files", "str] = None, bel_version: str = '2_1', tree: bool = False, sqlalchemy_connection_str: str", "file paths and results for each BEL file processed. Examples -------- Task: Validate", "be filled for individual BEL files. validation_results[bel_file] = dict() logger.info(f\"Processing {bel_file}\") result =", "individual BEL files. validation_results[bel_file] = dict() logger.info(f\"Processing {bel_file}\") result = check_bel_script( bel_script_path=bel_file, force_new_db=force_new_db,", "list List of file paths for the reports written. \"\"\" # TODO: report_type", "Union[Iterable[str], str], result: dict, report_type: str) -> list: \"\"\"Write report in different types", "if not reports: logger.info('\\n'.join([x.to_string() for x in result['errors']]) + \"\\n\") else: _write_report(reports, result,", "JSON file even if there are namespace/value errors. Defaults to False. Returns -------", "JSON file that can be used for importing BEL relationships into an e(BE:L)", "Examples -------- Task: Validate BEL script `my.bel` for BEL syntax 2.0, create error", "False,): \"\"\"Validate BEL script for correct syntax following eBNF grammar. Parameters ---------- bel_script_path:", "as belfile: content = belfile.read() new_content = content for regex_pattern in re.findall(r'\\n((SET\\s+(DOCUMENT\\s+Description|Evidence|SupportingText)' r'\\s*=\\s*)\"(((?<=\\\\)\"|[^\"])+)\"\\s*\\n*)',", "one. Defaults to False. line_by_line: bool TODO: Write this. reports: Iterable[str] or str", "# Create dict to be filled for individual BEL files. validation_results[bel_file] = dict()", "from ebel.parser import check_bel_script_line_by_line, check_bel_script, bel_to_json logger = logging.getLogger(__name__) def validate_bel_file(bel_script_path: str, force_new_db:", "or str List of file paths to write reports to. Multiple formats of", "open(bel_script_path, \"r\", encoding=\"utf-8\") as belfile: content = belfile.read() new_content = content for regex_pattern", "DOCUMENT Description = \" else: new_prefix = \"SET Support = \" new_evidence_text =", "importable JSON file even if there are namespace/value errors. Defaults to False. Returns", "content != new_content: if new_file_path: with open(new_file_path + \".diff2repaired\", \"w\") as new_file: new_file.write('\\n'.join(list(difflib.ndiff(content.split(\"\\n\"),", "np.str: df.hint = df.hint.str.replace(r'\\|', '&#124;') if df.entry.dtype == np.str: df.entry = df.entry.str.replace(r'\\|', '&#124;')", "the most recent version. tree: bool Generates a tree of relationships derived from", "in df.line_number] df3 = pd.concat([df2, df]) df3.to_csv(report, sep=\"|\", index=False, quoting=csv.QUOTE_NONE, escapechar=\"\\\\\") return reports", "create an importable JSON file even if there are namespace/value errors. Defaults to", "line) with open(bel_script_path, \"r\", encoding=\"utf-8\") as belfile: content = belfile.read() new_content = content", "bel_files.append(bel_file_path) else: bel_files = [bel_path] return bel_files def _write_report(reports: Union[Iterable[str], str], result: dict,", "os.listdir(bel_path): if file.endswith(\".bel\"): bel_file_path = os.path.join(bel_path, file) bel_files.append(bel_file_path) else: bel_files = [bel_path] return", "BEL file. Defaults to False. sqlalchemy_connection_str: str Path to SQLLite database to be", "at the end of line) with open(bel_script_path, \"r\", encoding=\"utf-8\") as belfile: content =", "x in result]) + \"\\n\") else: if sqlalchemy_connection_str: ebel.database.set_connection(sqlalchemy_connection_str) bel_files = _create_list_bel_files(bel_path=bel_script_path) validation_results['bel_files_checked']", "force_json: bool If True, will create an importable JSON file even if there", "Task: Validate BEL script `my.bel` for BEL syntax 2.0, create error reports in", "validation_results[bel_file]['tree'] = result['tree'] if result['warnings'] and reports: report_paths = _write_report(reports, result, report_type='warnings') validation_results[bel_file]['reports']", "typing import Iterable, Union, Optional from textwrap import fill import numpy as np", "line_by_line: bool TODO: Write this. reports: Iterable[str] or str List of file paths", "or 'errors'. Returns ------- list List of file paths for the reports written.", "tree: bool Generates a tree of relationships derived from the BEL file. Defaults", "can not be printed because errors still exists\\n\") else: logger.debug(result['tree']) validation_results[bel_file]['tree'] = result['tree']", "for importing BEL relationships into an e(BE:L) generated OrientDB database. Only creates the", "formats or comma separated list of report file names. result : dict return", "report file names. result : dict return value of check_bel_script methode. report_type :", "as list. If single file is passed, returns a list with that path.\"\"\"", "new_file_path: with open(new_file_path + \".diff2repaired\", \"w\") as new_file: new_file.write('\\n'.join(list(difflib.ndiff(content.split(\"\\n\"), new_content.split(\"\\n\"))))) else: with open(bel_script_path,", "\".json\" if int(bel_version[0]) > 1: json_tree = bel_to_json(results['tree']) open(json_path, \"w\").write(json_tree) return json_path def", "1.0, 2.0, and 2.1. Defaults to the most recent version. tree: bool Generates", "break_long_words=False).replace(\"\\n\", \" \\\\\\n\") new_evidence = new_prefix + '\"' + new_evidence_text + '\"\\n\\n' new_content", "recent version. tree: bool Generates a tree of relationships derived from the BEL", "an e(BE:L) generated OrientDB database. Only creates the JSON file when there are", "+ \".diff2repaired\", \"w\") as new_file: new_file.write('\\n'.join(list(difflib.ndiff(content.split(\"\\n\"), new_content.split(\"\\n\"))))) else: with open(bel_script_path, \"w\") as output_file:", "could be 'warnings' or 'errors'. Returns ------- list List of file paths for", "If single file is passed, returns a list with that path.\"\"\" if os.path.isdir(bel_path):", "be printed because errors still exists\\n\") else: logger.debug(result['tree']) validation_results[bel_file]['tree'] = result['tree'] if result['warnings']", "and 2.1. Defaults to the most recent version. tree: bool Generates a tree", "result['tree'] if result['warnings'] and reports: report_paths = _write_report(reports, result, report_type='warnings') validation_results[bel_file]['reports'] = report_paths", "True, force_json: bool = False,): \"\"\"Validate BEL script for correct syntax following eBNF", "perhaps not working result = check_bel_script_line_by_line(bel_script_path, error_report_file_path=reports, bel_version=bel_version) if reports: logger.info(\"Wrote report to", "\"w\") as new_file: new_file.write('\\n'.join(list(difflib.ndiff(content.split(\"\\n\"), new_content.split(\"\\n\"))))) else: with open(bel_script_path, \"w\") as output_file: output_file.write(new_content) def", "of report file names. result : dict return value of check_bel_script methode. report_type", "as pd import ebel.database from ebel.parser import check_bel_script_line_by_line, check_bel_script, bel_to_json logger = logging.getLogger(__name__)", "tree: bool = False, sqlalchemy_connection_str: str = None, json_file: bool = True, force_json:", ") if json_file: if not result['errors'] or force_json: json_file = _write_odb_json(bel_path=bel_file, results=result, bel_version=bel_version)", "XLS, XLSX, JSON, HTML, MD bel_version: {'1', '2', '2_1'} Which BEL grammar version", "Path to BEL file or directory contaiing BEL files. force_new_db: bool Delete current", "be used for storing/looking up used namespaces and values. If None given, it", "new_evidence = new_prefix + '\"' + new_evidence_text + '\"\\n\\n' new_content = new_content.replace(regex_pattern[0], new_evidence)", "str Path to SQLLite database to be used for storing/looking up used namespaces", "report.endswith('.json'): df.to_json(report) if report.endswith('.txt'): open(report, \"w\").write(df.to_string()) if report.endswith('.html'): df.to_html(report) if report.endswith('.md'): cols =", "new_evidence_text + '\"\\n\\n' new_content = new_content.replace(regex_pattern[0], new_evidence) if content != new_content: if new_file_path:", "files. validation_results[bel_file] = dict() logger.info(f\"Processing {bel_file}\") result = check_bel_script( bel_script_path=bel_file, force_new_db=force_new_db, bel_version=bel_version, )", "re.sub(r'(\\\\)(\\w)', r'\\g<2>', new_evidence_text) new_evidence_text = fill(new_evidence_text, break_long_words=False).replace(\"\\n\", \" \\\\\\n\") new_evidence = new_prefix +", "1: json_tree = bel_to_json(results['tree']) open(json_path, \"w\").write(json_tree) return json_path def _create_list_bel_files(bel_path: str) -> list:", "Generates a tree of relationships derived from the BEL file. Defaults to False.", "if os.path.isdir(bel_path): bel_files = [] for file in os.listdir(bel_path): if file.endswith(\".bel\"): bel_file_path =", "for x in df.url] url_template = \"[%s](\" + report.split(\".bel.\")[0] + \".bel?expanded=true&viewer=simple#L%s)\" df.line_number =", "HTML, MD bel_version: {'1', '2', '2_1'} Which BEL grammar version should be used", "+ \"\\n\") else: _write_report(reports, result, report_type='errors') def repair_bel_file(bel_script_path: str, new_file_path: Optional[str] = None):", "in reports. Parameters ---------- reports : Iterable[str] or str List of report formats", "import Iterable, Union, Optional from textwrap import fill import numpy as np import", "ebel.parser import check_bel_script_line_by_line, check_bel_script, bel_to_json logger = logging.getLogger(__name__) def validate_bel_file(bel_script_path: str, force_new_db: bool", "---------- reports : Iterable[str] or str List of report formats or comma separated", "int(bel_version[0]) > 1: json_tree = bel_to_json(results['tree']) open(json_path, \"w\").write(json_tree) return json_path def _create_list_bel_files(bel_path: str)", "import os import re import csv import difflib import logging from typing import", "no grammar or syntax errors. Defaults to True. force_json: bool If True, will", "paths for the reports written. \"\"\" # TODO: report_type options should be constants", "report.endswith('.xlsx'): df.to_excel(report, engine='xlsxwriter') if report.endswith('.tsv'): df.to_csv(report, sep='\\t') if report.endswith('.json'): df.to_json(report) if report.endswith('.txt'): open(report,", "namespaces/values and generate a new one. Defaults to False. line_by_line: bool TODO: Write", "results for each BEL file processed. Examples -------- Task: Validate BEL script `my.bel`", "if tree: if result['errors']: logger.error(\"Tree can not be printed because errors still exists\\n\")", "(x, x) for x in df.line_number] df3 = pd.concat([df2, df]) df3.to_csv(report, sep=\"|\", index=False,", "if report.endswith('.html'): df.to_html(report) if report.endswith('.md'): cols = df.columns df2 = pd.DataFrame([['---', ] *", "encoding=\"utf-8\") as belfile: content = belfile.read() new_content = content for regex_pattern in re.findall(r'\\n((SET\\s+(DOCUMENT\\s+Description|Evidence|SupportingText)'", "syntax 2.0, create error reports in Markdown and JSON format. In case of", "for validating a BEL file.\"\"\" import os import re import csv import difflib", "the file name suffix in reports. Parameters ---------- reports : Iterable[str] or str", "bel_file_path = os.path.join(bel_path, file) bel_files.append(bel_file_path) else: bel_files = [bel_path] return bel_files def _write_report(reports:", "grammar version should be used for validating the BEL file. Current available are", "of the report can be generated at once. Acceptable formats include: CSV, TSV,", "Defaults to True. force_json: bool If True, will create an importable JSON file", "bel_files for bel_file in bel_files: # Create dict to be filled for individual", "reports[1:-1] if line_by_line: # TODO: This is perhaps not working result = check_bel_script_line_by_line(bel_script_path,", "if file.endswith(\".bel\"): bel_file_path = os.path.join(bel_path, file) bel_files.append(bel_file_path) else: bel_files = [bel_path] return bel_files", "list with that path.\"\"\" if os.path.isdir(bel_path): bel_files = [] for file in os.listdir(bel_path):", "new_evidence_text) new_evidence_text = fill(new_evidence_text, break_long_words=False).replace(\"\\n\", \" \\\\\\n\") new_evidence = new_prefix + '\"' +", "regular expression for missing continuous line (\\ at the end of line) with", "with open(bel_script_path, \"r\", encoding=\"utf-8\") as belfile: content = belfile.read() new_content = content for", "1 if isinstance(reports, str): reports = reports.split(\",\") for report in reports: if report.endswith('.csv'):", "if evidence: # regular expression for missing continuous line (\\ at the end", "json_file: bool If True, generates a JSON file that can be used for", "relationships into an e(BE:L) generated OrientDB database. Only creates the JSON file when", "+ new_evidence_text + '\"\\n\\n' new_content = new_content.replace(regex_pattern[0], new_evidence) if content != new_content: if", "\"\\n\") else: _write_report(reports, result, report_type='errors') def repair_bel_file(bel_script_path: str, new_file_path: Optional[str] = None): \"\"\"Repair", "BEL files in directory as list. If single file is passed, returns a", "return value of check_bel_script methode. report_type : str `report_type` could be 'warnings' or", "\"line_number\", \"column\", \"hint\"] df = pd.DataFrame(data=errors_or_warns_as_list_of_dicts, columns=columns) df.index += 1 if isinstance(reports, str):", "= _write_odb_json(bel_path=bel_file, results=result, bel_version=bel_version) validation_results[bel_file]['json'] = json_file if tree: if result['errors']: logger.error(\"Tree can", "'warnings' or 'errors'. Returns ------- list List of file paths for the reports", "\"hint\"] df = pd.DataFrame(data=errors_or_warns_as_list_of_dicts, columns=columns) df.index += 1 if isinstance(reports, str): reports =", "str List of report formats or comma separated list of report file names.", "result : dict return value of check_bel_script methode. report_type : str `report_type` could", "df.to_html(report) if report.endswith('.md'): cols = df.columns df2 = pd.DataFrame([['---', ] * len(cols)], columns=cols)", "even if there are namespace/value errors. Defaults to False. Returns ------- dict Dictionary", "import re import csv import difflib import logging from typing import Iterable, Union,", "are 1.0, 2.0, and 2.1. Defaults to the most recent version. tree: bool", "\\\\\\n\") new_evidence = new_prefix + '\"' + new_evidence_text + '\"\\n\\n' new_content = new_content.replace(regex_pattern[0],", "and reports.startswith('\"') and reports.endswith('\"'): reports = reports[1:-1] if line_by_line: # TODO: This is", "exists\\n\") else: logger.debug(result['tree']) validation_results[bel_file]['tree'] = result['tree'] if result['warnings'] and reports: report_paths = _write_report(reports,", "'\"' + new_evidence_text + '\"\\n\\n' new_content = new_content.replace(regex_pattern[0], new_evidence) if content != new_content:", "= [(\"[url](\" + str(x) + \")\" if not pd.isna(x) else '') for x", "re.sub(r\"(\\\\?[\\r\\n]+)|\\\\ \", \" \", regex_pattern[3].strip()) new_evidence_text = re.sub(r\"\\s{2,}\", \" \", new_evidence_text) new_evidence_text =", "import fill import numpy as np import pandas as pd import ebel.database from", "> 1: json_tree = bel_to_json(results['tree']) open(json_path, \"w\").write(json_tree) return json_path def _create_list_bel_files(bel_path: str) ->", "from typing import Iterable, Union, Optional from textwrap import fill import numpy as", "df.index += 1 if isinstance(reports, str): reports = reports.split(\",\") for report in reports:", "False, sqlalchemy_connection_str: str = None, json_file: bool = True, force_json: bool = False,):", "{bel_file}\") result = check_bel_script( bel_script_path=bel_file, force_new_db=force_new_db, bel_version=bel_version, ) if json_file: if not result['errors']", "once. Acceptable formats include: CSV, TSV, TXT, XLS, XLSX, JSON, HTML, MD bel_version:", "= bel_to_json(results['tree']) open(json_path, \"w\").write(json_tree) return json_path def _create_list_bel_files(bel_path: str) -> list: \"\"\"Export all", "validation_results[bel_file] = dict() logger.info(f\"Processing {bel_file}\") result = check_bel_script( bel_script_path=bel_file, force_new_db=force_new_db, bel_version=bel_version, ) if", "\"w\") as output_file: output_file.write(new_content) def _write_odb_json(bel_path: str, results: dict, bel_version: str) -> str:", "str], result: dict, report_type: str) -> list: \"\"\"Write report in different types depending", "e(BE:L) generated OrientDB database. Only creates the JSON file when there are no", "grammar. Parameters ---------- bel_script_path: str Path to BEL file or directory contaiing BEL", "report.split(\".bel.\")[0] + \".bel?expanded=true&viewer=simple#L%s)\" df.line_number = [url_template % (x, x) for x in df.line_number]", "of file paths to write reports to. Multiple formats of the report can", "Current available are 1.0, 2.0, and 2.1. Defaults to the most recent version.", "if result['errors']: logger.error(\"Tree can not be printed because errors still exists\\n\") else: logger.debug(result['tree'])", "ebel.database.set_connection(sqlalchemy_connection_str) bel_files = _create_list_bel_files(bel_path=bel_script_path) validation_results['bel_files_checked'] = bel_files for bel_file in bel_files: # Create", "report.endswith('.tsv'): df.to_csv(report, sep='\\t') if report.endswith('.json'): df.to_json(report) if report.endswith('.txt'): open(report, \"w\").write(df.to_string()) if report.endswith('.html'): df.to_html(report)", "bel_script_path.startswith('\"') and bel_script_path.endswith('\"'): bel_script_path = bel_script_path[1:-1] if reports and reports.startswith('\"') and reports.endswith('\"'): reports", "\"\"\"Export all BEL files in directory as list. If single file is passed,", "CSV, TSV, TXT, XLS, XLSX, JSON, HTML, MD bel_version: {'1', '2', '2_1'} Which", "up used namespaces and values. If None given, it uses the generated e(BE:L)", "to False. sqlalchemy_connection_str: str Path to SQLLite database to be used for storing/looking", "json_file: if not result['errors'] or force_json: json_file = _write_odb_json(bel_path=bel_file, results=result, bel_version=bel_version) validation_results[bel_file]['json'] =", "single file is passed, returns a list with that path.\"\"\" if os.path.isdir(bel_path): bel_files", "file name suffix in reports. Parameters ---------- reports : Iterable[str] or str List", "in directory as list. If single file is passed, returns a list with", "df.hint.dtype == np.str: df.hint = df.hint.str.replace(r'\\|', '&#124;') if df.entry.dtype == np.str: df.entry =", "uses the generated e(BE:L) database (default). json_file: bool If True, generates a JSON", ": str Path to the BEL file. new_file_path : str (optional) Export repaired", "generated at once. Acceptable formats include: CSV, TSV, TXT, XLS, XLSX, JSON, HTML,", "if line_by_line: # TODO: This is perhaps not working result = check_bel_script_line_by_line(bel_script_path, error_report_file_path=reports,", "new_file_path: Optional[str] = None): \"\"\"Repair a BEL document. Parameters ---------- bel_script_path : str", "or directory contaiing BEL files. force_new_db: bool Delete current database of namespaces/values and", "\"\"\"Validate BEL script for correct syntax following eBNF grammar. Parameters ---------- bel_script_path: str", "ebel validate my.bel -v 2 -r error_report.md,error_report.json \"\"\" validation_results = dict() if bel_script_path.startswith('\"')", "\"entry\", \"line_number\", \"column\", \"hint\"] df = pd.DataFrame(data=errors_or_warns_as_list_of_dicts, columns=columns) df.index += 1 if isinstance(reports,", "Union, Optional from textwrap import fill import numpy as np import pandas as", "a new one. Defaults to False. line_by_line: bool TODO: Write this. reports: Iterable[str]", "\"w\").write(json_tree) return json_path def _create_list_bel_files(bel_path: str) -> list: \"\"\"Export all BEL files in", "new_prefix = \"SET DOCUMENT Description = \" else: new_prefix = \"SET Support =", "creates the JSON file when there are no grammar or syntax errors. Defaults", "bool Delete current database of namespaces/values and generate a new one. Defaults to", "bel_script_path = bel_script_path[1:-1] if reports and reports.startswith('\"') and reports.endswith('\"'): reports = reports[1:-1] if", "script `my.bel` for BEL syntax 2.0, create error reports in Markdown and JSON", "else: logger.debug(result['tree']) validation_results[bel_file]['tree'] = result['tree'] if result['warnings'] and reports: report_paths = _write_report(reports, result,", "result: dict, report_type: str) -> list: \"\"\"Write report in different types depending on", "df.entry.str.replace(r'\\|', '&#124;') df.url = [(\"[url](\" + str(x) + \")\" if not pd.isna(x) else", "= True, force_json: bool = False,): \"\"\"Validate BEL script for correct syntax following", "Cytoscape: > ebel validate my.bel -v 2 -r error_report.md,error_report.json \"\"\" validation_results = dict()", "bool = False,): \"\"\"Validate BEL script for correct syntax following eBNF grammar. Parameters", "Path to SQLLite database to be used for storing/looking up used namespaces and", "file.\"\"\" import os import re import csv import difflib import logging from typing", "new_evidence_text = re.sub(r\"(\\\\?[\\r\\n]+)|\\\\ \", \" \", regex_pattern[3].strip()) new_evidence_text = re.sub(r\"\\s{2,}\", \" \", new_evidence_text)", "Acceptable formats include: CSV, TSV, TXT, XLS, XLSX, JSON, HTML, MD bel_version: {'1',", "\" new_evidence_text = re.sub(r\"(\\\\?[\\r\\n]+)|\\\\ \", \" \", regex_pattern[3].strip()) new_evidence_text = re.sub(r\"\\s{2,}\", \" \",", "df.to_excel(report, engine='xlsxwriter') if report.endswith('.tsv'): df.to_csv(report, sep='\\t') if report.endswith('.json'): df.to_json(report) if report.endswith('.txt'): open(report, \"w\").write(df.to_string())", "bool Generates a tree of relationships derived from the BEL file. Defaults to", "bel_path + \".json\" if int(bel_version[0]) > 1: json_tree = bel_to_json(results['tree']) open(json_path, \"w\").write(json_tree) return", "logger = logging.getLogger(__name__) def validate_bel_file(bel_script_path: str, force_new_db: bool = False, line_by_line: bool =", "in result]) + \"\\n\") else: if sqlalchemy_connection_str: ebel.database.set_connection(sqlalchemy_connection_str) bel_files = _create_list_bel_files(bel_path=bel_script_path) validation_results['bel_files_checked'] =", "a tree of relationships derived from the BEL file. Defaults to False. sqlalchemy_connection_str:", "json_tree = bel_to_json(results['tree']) open(json_path, \"w\").write(json_tree) return json_path def _create_list_bel_files(bel_path: str) -> list: \"\"\"Export", "separated list of report file names. result : dict return value of check_bel_script", "pd.isna(x) else '') for x in df.url] url_template = \"[%s](\" + report.split(\".bel.\")[0] +", "each BEL file processed. Examples -------- Task: Validate BEL script `my.bel` for BEL", "for the import of BEL network into Cytoscape: > ebel validate my.bel -v", "validate my.bel -v 2 -r error_report.md,error_report.json \"\"\" validation_results = dict() if bel_script_path.startswith('\"') and", "that can be used for importing BEL relationships into an e(BE:L) generated OrientDB" ]
[ "\"report\" INSPECT_CMD = \"inspect\" REMOVE_CMD = \"remove\" RETRIEVE_CMD = \"retrieve\" STATUS_CMD = \"status\"", "ENV_VARS = { \"namespace\": \"PIPESTAT_NAMESPACE\", \"config\": \"PIPESTAT_CONFIG\", \"results_file\": \"PIPESTAT_RESULTS_FILE\", \"schema\": \"PIPESTAT_RESULTS_SCHEMA\", \"status_schema\": \"PIPESTAT_SATUS_SCHEMA\",", "result.\", RETRIEVE_CMD: \"Retrieve a result.\", STATUS_CMD: \"Manage pipeline status.\", } STATUS_GET_CMD = \"get\"", "\"_highlighted\" DB_COLUMN_KEY = \"db_column\" DB_RELATIONSHIP_KEY = \"relationship\" DB_RELATIONSHIP_NAME_KEY = \"name\" DB_RELATIONSHIP_TABLE_KEY = \"table\"", "\"Get status.\", } DOC_URL = \"http://pipestat.databio.org/en/latest/db_config/\" # DB config keys CFG_DATABASE_KEY = \"database\"", "JSON, Boolean, Float, Integer, String PKG_NAME = \"pipestat\" LOCK_PREFIX = \"lock.\" REPORT_CMD =", "\"Inspect a database.\", REMOVE_CMD: \"Remove a result.\", RETRIEVE_CMD: \"Retrieve a result.\", STATUS_CMD: \"Manage", "\"remove\" RETRIEVE_CMD = \"retrieve\" STATUS_CMD = \"status\" SUBPARSER_MSGS = { REPORT_CMD: \"Report a", "DB column names ID = \"id\" RECORD_ID = \"record_identifier\" STATUS = \"status\" RESERVED_COLNAMES", "bool, } SQL_CLASSES_BY_TYPE = { \"number\": Float, \"integer\": Integer, \"object\": JSONB, \"image\": JSONB,", "= os.path.join( os.path.dirname(os.path.abspath(__file__)), \"schemas\", \"pipestat_config_schema.yaml\" ) STATUS_SCHEMA = os.path.join( os.path.dirname(os.path.abspath(__file__)), \"schemas\", \"status_schema.yaml\" )", "Float, \"integer\": Integer, \"object\": JSONB, \"image\": JSONB, \"file\": JSONB, \"string\": String(500), \"array\": JSONB,", "\"_database_only\" CONFIG_KEY = \"_config\" SCHEMA_KEY = \"_schema\" STATUS_KEY = \"_status\" STATUS_SCHEMA_KEY = \"_status_schema\"", "pipeline status.\", } STATUS_GET_CMD = \"get\" STATUS_SET_CMD = \"set\" STATUS_SUBPARSER_MESSAGES = { STATUS_SET_CMD:", "names DB_ONLY_KEY = \"_database_only\" CONFIG_KEY = \"_config\" SCHEMA_KEY = \"_schema\" STATUS_KEY = \"_status\"", "STATUS_CMD: \"Manage pipeline status.\", } STATUS_GET_CMD = \"get\" STATUS_SET_CMD = \"set\" STATUS_SUBPARSER_MESSAGES =", "INSPECT_CMD: \"Inspect a database.\", REMOVE_CMD: \"Remove a result.\", RETRIEVE_CMD: \"Retrieve a result.\", STATUS_CMD:", "\"string\"}, \"thumbnail_path\": {\"type\": \"string\"}, \"title\": {\"type\": \"string\"}, }, \"required\": [\"path\", \"thumbnail_path\", \"title\"], },", "CFG_PASSWORD_KEY, CFG_USER_KEY, CFG_NAME_KEY, CFG_DIALECT_KEY, CFG_DRIVER_KEY, ] # object attribute names DB_ONLY_KEY = \"_database_only\"", "= \"http://pipestat.databio.org/en/latest/db_config/\" # DB config keys CFG_DATABASE_KEY = \"database\" CFG_NAME_KEY = \"name\" CFG_HOST_KEY", "\"PIPESTAT_NAMESPACE\", \"config\": \"PIPESTAT_CONFIG\", \"results_file\": \"PIPESTAT_RESULTS_FILE\", \"schema\": \"PIPESTAT_RESULTS_SCHEMA\", \"status_schema\": \"PIPESTAT_SATUS_SCHEMA\", \"record_identifier\": \"PIPESTAT_RECORD_ID\", } CLASSES_BY_TYPE", "\"_file\" RECORD_ID_KEY = \"_record_id\" DB_SESSION_KEY = \"_db_session\" DB_SCOPED_SESSION_KEY = \"_db_scoped_session\" DB_ENGINE_KEY = \"_db_engine\"", "DB config keys CFG_DATABASE_KEY = \"database\" CFG_NAME_KEY = \"name\" CFG_HOST_KEY = \"host\" CFG_PORT_KEY", "JSONB, \"string\": String(500), \"array\": JSONB, \"boolean\": Boolean, } CFG_SCHEMA = os.path.join( os.path.dirname(os.path.abspath(__file__)), \"schemas\",", "RECORD_ID = \"record_identifier\" STATUS = \"status\" RESERVED_COLNAMES = [ID, RECORD_ID] CANONICAL_TYPES = {", "= \"name\" CFG_HOST_KEY = \"host\" CFG_PORT_KEY = \"port\" CFG_PASSWORD_KEY = \"password\" CFG_USER_KEY =", "\"_declarative_base\" DB_ORMS_KEY = \"_orms\" DATA_KEY = \"_data\" NAME_KEY = \"_name\" FILE_KEY = \"_file\"", "os.path.join( os.path.dirname(os.path.abspath(__file__)), \"schemas\", \"pipestat_config_schema.yaml\" ) STATUS_SCHEMA = os.path.join( os.path.dirname(os.path.abspath(__file__)), \"schemas\", \"status_schema.yaml\" ) STATUS_TABLE_SCHEMA", "\"Retrieve a result.\", STATUS_CMD: \"Manage pipeline status.\", } STATUS_GET_CMD = \"get\" STATUS_SET_CMD =", "= \"name\" DB_RELATIONSHIP_TABLE_KEY = \"table\" DB_RELATIONSHIP_COL_KEY = \"column\" DB_RELATIONSHIP_BACKREF_KEY = \"backref\" DB_RELATIONSHIP_ELEMENTS =", "keys SCHEMA_PROP_KEY = \"properties\" SCHEMA_TYPE_KEY = \"type\" SCHEMA_DESC_KEY = \"description\" # DB column", "CFG_DATABASE_KEY = \"database\" CFG_NAME_KEY = \"name\" CFG_HOST_KEY = \"host\" CFG_PORT_KEY = \"port\" CFG_PASSWORD_KEY", "SUBPARSER_MSGS = { REPORT_CMD: \"Report a result.\", INSPECT_CMD: \"Inspect a database.\", REMOVE_CMD: \"Remove", "INSPECT_CMD = \"inspect\" REMOVE_CMD = \"remove\" RETRIEVE_CMD = \"retrieve\" STATUS_CMD = \"status\" SUBPARSER_MSGS", "os from sqlalchemy.dialects.postgresql.json import JSONB from sqlalchemy.types import ARRAY, JSON, Boolean, Float, Integer,", "{ \"namespace\": \"PIPESTAT_NAMESPACE\", \"config\": \"PIPESTAT_CONFIG\", \"results_file\": \"PIPESTAT_RESULTS_FILE\", \"schema\": \"PIPESTAT_RESULTS_SCHEMA\", \"status_schema\": \"PIPESTAT_SATUS_SCHEMA\", \"record_identifier\": \"PIPESTAT_RECORD_ID\",", "\"description\" # DB column names ID = \"id\" RECORD_ID = \"record_identifier\" STATUS =", "JSONB, \"file\": JSONB, \"string\": String(500), \"array\": JSONB, \"boolean\": Boolean, } CFG_SCHEMA = os.path.join(", "{\"type\": \"string\"}, }, \"required\": [\"path\", \"thumbnail_path\", \"title\"], }, \"file\": { \"type\": \"object\", \"properties\":", "STATUS_GET_CMD = \"get\" STATUS_SET_CMD = \"set\" STATUS_SUBPARSER_MESSAGES = { STATUS_SET_CMD: \"Set status.\", STATUS_GET_CMD:", "= \"id\" RECORD_ID = \"record_identifier\" STATUS = \"status\" RESERVED_COLNAMES = [ID, RECORD_ID] CANONICAL_TYPES", "} STATUS_GET_CMD = \"get\" STATUS_SET_CMD = \"set\" STATUS_SUBPARSER_MESSAGES = { STATUS_SET_CMD: \"Set status.\",", "\"_data\" NAME_KEY = \"_name\" FILE_KEY = \"_file\" RECORD_ID_KEY = \"_record_id\" DB_SESSION_KEY = \"_db_session\"", "] # schema keys SCHEMA_PROP_KEY = \"properties\" SCHEMA_TYPE_KEY = \"type\" SCHEMA_DESC_KEY = \"description\"", "\"string\": String(500), \"array\": JSONB, \"boolean\": Boolean, } CFG_SCHEMA = os.path.join( os.path.dirname(os.path.abspath(__file__)), \"schemas\", \"pipestat_config_schema.yaml\"", "keys CFG_DATABASE_KEY = \"database\" CFG_NAME_KEY = \"name\" CFG_HOST_KEY = \"host\" CFG_PORT_KEY = \"port\"", "\"title\"], }, } ENV_VARS = { \"namespace\": \"PIPESTAT_NAMESPACE\", \"config\": \"PIPESTAT_CONFIG\", \"results_file\": \"PIPESTAT_RESULTS_FILE\", \"schema\":", "String PKG_NAME = \"pipestat\" LOCK_PREFIX = \"lock.\" REPORT_CMD = \"report\" INSPECT_CMD = \"inspect\"", "DB_RELATIONSHIP_TABLE_KEY, ] # schema keys SCHEMA_PROP_KEY = \"properties\" SCHEMA_TYPE_KEY = \"type\" SCHEMA_DESC_KEY =", "\"_config\" SCHEMA_KEY = \"_schema\" STATUS_KEY = \"_status\" STATUS_SCHEMA_KEY = \"_status_schema\" STATUS_SCHEMA_SOURCE_KEY = \"_status_schema_source\"", "= { \"number\": float, \"integer\": int, \"object\": dict, \"image\": dict, \"file\": dict, \"string\":", "status.\", } STATUS_GET_CMD = \"get\" STATUS_SET_CMD = \"set\" STATUS_SUBPARSER_MESSAGES = { STATUS_SET_CMD: \"Set", "\"object\", \"properties\": { \"path\": {\"type\": \"string\"}, \"thumbnail_path\": {\"type\": \"string\"}, \"title\": {\"type\": \"string\"}, },", "\"column\" DB_RELATIONSHIP_BACKREF_KEY = \"backref\" DB_RELATIONSHIP_ELEMENTS = [ DB_RELATIONSHIP_BACKREF_KEY, DB_RELATIONSHIP_COL_KEY, DB_RELATIONSHIP_NAME_KEY, DB_RELATIONSHIP_TABLE_KEY, ] #", "RETRIEVE_CMD = \"retrieve\" STATUS_CMD = \"status\" SUBPARSER_MSGS = { REPORT_CMD: \"Report a result.\",", "CLASSES_BY_TYPE = { \"number\": float, \"integer\": int, \"object\": dict, \"image\": dict, \"file\": dict,", "Integer, String PKG_NAME = \"pipestat\" LOCK_PREFIX = \"lock.\" REPORT_CMD = \"report\" INSPECT_CMD =", "\"table\" DB_RELATIONSHIP_COL_KEY = \"column\" DB_RELATIONSHIP_BACKREF_KEY = \"backref\" DB_RELATIONSHIP_ELEMENTS = [ DB_RELATIONSHIP_BACKREF_KEY, DB_RELATIONSHIP_COL_KEY, DB_RELATIONSHIP_NAME_KEY,", "sqlalchemy.dialects.postgresql.json import JSONB from sqlalchemy.types import ARRAY, JSON, Boolean, Float, Integer, String PKG_NAME", "STATUS_SCHEMA = os.path.join( os.path.dirname(os.path.abspath(__file__)), \"schemas\", \"status_schema.yaml\" ) STATUS_TABLE_SCHEMA = os.path.join( os.path.dirname(os.path.abspath(__file__)), \"schemas\", \"status_table_schema.yaml\"", "CFG_USER_KEY = \"user\" CFG_DIALECT_KEY = \"dialect\" # sqlite, mysql, postgresql, oracle, or mssql", "\"http://pipestat.databio.org/en/latest/db_config/\" # DB config keys CFG_DATABASE_KEY = \"database\" CFG_NAME_KEY = \"name\" CFG_HOST_KEY =", "HIGHLIGHTED_KEY = \"_highlighted\" DB_COLUMN_KEY = \"db_column\" DB_RELATIONSHIP_KEY = \"relationship\" DB_RELATIONSHIP_NAME_KEY = \"name\" DB_RELATIONSHIP_TABLE_KEY", "= \"_record_id\" DB_SESSION_KEY = \"_db_session\" DB_SCOPED_SESSION_KEY = \"_db_scoped_session\" DB_ENGINE_KEY = \"_db_engine\" HIGHLIGHTED_KEY =", "= \"status\" RESERVED_COLNAMES = [ID, RECORD_ID] CANONICAL_TYPES = { \"image\": { \"type\": \"object\",", "\"database\" CFG_NAME_KEY = \"name\" CFG_HOST_KEY = \"host\" CFG_PORT_KEY = \"port\" CFG_PASSWORD_KEY = \"password\"", "status.\", } DOC_URL = \"http://pipestat.databio.org/en/latest/db_config/\" # DB config keys CFG_DATABASE_KEY = \"database\" CFG_NAME_KEY", "# object attribute names DB_ONLY_KEY = \"_database_only\" CONFIG_KEY = \"_config\" SCHEMA_KEY = \"_schema\"", "= \"port\" CFG_PASSWORD_KEY = \"password\" CFG_USER_KEY = \"user\" CFG_DIALECT_KEY = \"dialect\" # sqlite,", "CFG_SCHEMA = os.path.join( os.path.dirname(os.path.abspath(__file__)), \"schemas\", \"pipestat_config_schema.yaml\" ) STATUS_SCHEMA = os.path.join( os.path.dirname(os.path.abspath(__file__)), \"schemas\", \"status_schema.yaml\"", "SCHEMA_TYPE_KEY = \"type\" SCHEMA_DESC_KEY = \"description\" # DB column names ID = \"id\"", "\"user\" CFG_DIALECT_KEY = \"dialect\" # sqlite, mysql, postgresql, oracle, or mssql CFG_DRIVER_KEY =", "\"_orms\" DATA_KEY = \"_data\" NAME_KEY = \"_name\" FILE_KEY = \"_file\" RECORD_ID_KEY = \"_record_id\"", "LOCK_PREFIX = \"lock.\" REPORT_CMD = \"report\" INSPECT_CMD = \"inspect\" REMOVE_CMD = \"remove\" RETRIEVE_CMD", "= \"type\" SCHEMA_DESC_KEY = \"description\" # DB column names ID = \"id\" RECORD_ID", "\"object\", \"properties\": { \"path\": {\"type\": \"string\"}, \"title\": {\"type\": \"string\"}, }, \"required\": [\"path\", \"title\"],", "{ \"type\": \"object\", \"properties\": { \"path\": {\"type\": \"string\"}, \"thumbnail_path\": {\"type\": \"string\"}, \"title\": {\"type\":", "CFG_PORT_KEY = \"port\" CFG_PASSWORD_KEY = \"password\" CFG_USER_KEY = \"user\" CFG_DIALECT_KEY = \"dialect\" #", "sqlite, mysql, postgresql, oracle, or mssql CFG_DRIVER_KEY = \"driver\" DB_CREDENTIALS = [ CFG_HOST_KEY,", "NAME_KEY = \"_name\" FILE_KEY = \"_file\" RECORD_ID_KEY = \"_record_id\" DB_SESSION_KEY = \"_db_session\" DB_SCOPED_SESSION_KEY", "\"PIPESTAT_CONFIG\", \"results_file\": \"PIPESTAT_RESULTS_FILE\", \"schema\": \"PIPESTAT_RESULTS_SCHEMA\", \"status_schema\": \"PIPESTAT_SATUS_SCHEMA\", \"record_identifier\": \"PIPESTAT_RECORD_ID\", } CLASSES_BY_TYPE = {", "\"set\" STATUS_SUBPARSER_MESSAGES = { STATUS_SET_CMD: \"Set status.\", STATUS_GET_CMD: \"Get status.\", } DOC_URL =", "= \"_highlighted\" DB_COLUMN_KEY = \"db_column\" DB_RELATIONSHIP_KEY = \"relationship\" DB_RELATIONSHIP_NAME_KEY = \"name\" DB_RELATIONSHIP_TABLE_KEY =", "\"array\": list, \"boolean\": bool, } SQL_CLASSES_BY_TYPE = { \"number\": Float, \"integer\": Integer, \"object\":", "= \"_status\" STATUS_SCHEMA_KEY = \"_status_schema\" STATUS_SCHEMA_SOURCE_KEY = \"_status_schema_source\" STATUS_FILE_DIR = \"_status_file_dir\" RES_SCHEMAS_KEY =", "import JSONB from sqlalchemy.types import ARRAY, JSON, Boolean, Float, Integer, String PKG_NAME =", "= \"table\" DB_RELATIONSHIP_COL_KEY = \"column\" DB_RELATIONSHIP_BACKREF_KEY = \"backref\" DB_RELATIONSHIP_ELEMENTS = [ DB_RELATIONSHIP_BACKREF_KEY, DB_RELATIONSHIP_COL_KEY,", "\"file\": { \"type\": \"object\", \"properties\": { \"path\": {\"type\": \"string\"}, \"title\": {\"type\": \"string\"}, },", "import ARRAY, JSON, Boolean, Float, Integer, String PKG_NAME = \"pipestat\" LOCK_PREFIX = \"lock.\"", "RECORD_ID_KEY = \"_record_id\" DB_SESSION_KEY = \"_db_session\" DB_SCOPED_SESSION_KEY = \"_db_scoped_session\" DB_ENGINE_KEY = \"_db_engine\" HIGHLIGHTED_KEY", "REMOVE_CMD: \"Remove a result.\", RETRIEVE_CMD: \"Retrieve a result.\", STATUS_CMD: \"Manage pipeline status.\", }", "\"image\": { \"type\": \"object\", \"properties\": { \"path\": {\"type\": \"string\"}, \"thumbnail_path\": {\"type\": \"string\"}, \"title\":", "DB_RELATIONSHIP_TABLE_KEY = \"table\" DB_RELATIONSHIP_COL_KEY = \"column\" DB_RELATIONSHIP_BACKREF_KEY = \"backref\" DB_RELATIONSHIP_ELEMENTS = [ DB_RELATIONSHIP_BACKREF_KEY,", "CFG_DIALECT_KEY = \"dialect\" # sqlite, mysql, postgresql, oracle, or mssql CFG_DRIVER_KEY = \"driver\"", "FILE_KEY = \"_file\" RECORD_ID_KEY = \"_record_id\" DB_SESSION_KEY = \"_db_session\" DB_SCOPED_SESSION_KEY = \"_db_scoped_session\" DB_ENGINE_KEY", "= \"relationship\" DB_RELATIONSHIP_NAME_KEY = \"name\" DB_RELATIONSHIP_TABLE_KEY = \"table\" DB_RELATIONSHIP_COL_KEY = \"column\" DB_RELATIONSHIP_BACKREF_KEY =", "= { STATUS_SET_CMD: \"Set status.\", STATUS_GET_CMD: \"Get status.\", } DOC_URL = \"http://pipestat.databio.org/en/latest/db_config/\" #", "\"host\" CFG_PORT_KEY = \"port\" CFG_PASSWORD_KEY = \"password\" CFG_USER_KEY = \"user\" CFG_DIALECT_KEY = \"dialect\"", "CFG_HOST_KEY, CFG_PORT_KEY, CFG_PASSWORD_KEY, CFG_USER_KEY, CFG_NAME_KEY, CFG_DIALECT_KEY, CFG_DRIVER_KEY, ] # object attribute names DB_ONLY_KEY", "= \"user\" CFG_DIALECT_KEY = \"dialect\" # sqlite, mysql, postgresql, oracle, or mssql CFG_DRIVER_KEY", "\"PIPESTAT_RECORD_ID\", } CLASSES_BY_TYPE = { \"number\": float, \"integer\": int, \"object\": dict, \"image\": dict,", "schema keys SCHEMA_PROP_KEY = \"properties\" SCHEMA_TYPE_KEY = \"type\" SCHEMA_DESC_KEY = \"description\" # DB", "DB_RELATIONSHIP_BACKREF_KEY = \"backref\" DB_RELATIONSHIP_ELEMENTS = [ DB_RELATIONSHIP_BACKREF_KEY, DB_RELATIONSHIP_COL_KEY, DB_RELATIONSHIP_NAME_KEY, DB_RELATIONSHIP_TABLE_KEY, ] # schema", "DB_RELATIONSHIP_NAME_KEY, DB_RELATIONSHIP_TABLE_KEY, ] # schema keys SCHEMA_PROP_KEY = \"properties\" SCHEMA_TYPE_KEY = \"type\" SCHEMA_DESC_KEY", "STATUS_SCHEMA_SOURCE_KEY = \"_status_schema_source\" STATUS_FILE_DIR = \"_status_file_dir\" RES_SCHEMAS_KEY = \"_result_schemas\" DB_BASE_KEY = \"_declarative_base\" DB_ORMS_KEY", "\"_status_file_dir\" RES_SCHEMAS_KEY = \"_result_schemas\" DB_BASE_KEY = \"_declarative_base\" DB_ORMS_KEY = \"_orms\" DATA_KEY = \"_data\"", "import os from sqlalchemy.dialects.postgresql.json import JSONB from sqlalchemy.types import ARRAY, JSON, Boolean, Float,", "SQL_CLASSES_BY_TYPE = { \"number\": Float, \"integer\": Integer, \"object\": JSONB, \"image\": JSONB, \"file\": JSONB,", "# schema keys SCHEMA_PROP_KEY = \"properties\" SCHEMA_TYPE_KEY = \"type\" SCHEMA_DESC_KEY = \"description\" #", "= \"_db_scoped_session\" DB_ENGINE_KEY = \"_db_engine\" HIGHLIGHTED_KEY = \"_highlighted\" DB_COLUMN_KEY = \"db_column\" DB_RELATIONSHIP_KEY =", "\"backref\" DB_RELATIONSHIP_ELEMENTS = [ DB_RELATIONSHIP_BACKREF_KEY, DB_RELATIONSHIP_COL_KEY, DB_RELATIONSHIP_NAME_KEY, DB_RELATIONSHIP_TABLE_KEY, ] # schema keys SCHEMA_PROP_KEY", "DB_ENGINE_KEY = \"_db_engine\" HIGHLIGHTED_KEY = \"_highlighted\" DB_COLUMN_KEY = \"db_column\" DB_RELATIONSHIP_KEY = \"relationship\" DB_RELATIONSHIP_NAME_KEY", "status.\", STATUS_GET_CMD: \"Get status.\", } DOC_URL = \"http://pipestat.databio.org/en/latest/db_config/\" # DB config keys CFG_DATABASE_KEY", "STATUS_FILE_DIR = \"_status_file_dir\" RES_SCHEMAS_KEY = \"_result_schemas\" DB_BASE_KEY = \"_declarative_base\" DB_ORMS_KEY = \"_orms\" DATA_KEY", "# DB config keys CFG_DATABASE_KEY = \"database\" CFG_NAME_KEY = \"name\" CFG_HOST_KEY = \"host\"", "= \"host\" CFG_PORT_KEY = \"port\" CFG_PASSWORD_KEY = \"password\" CFG_USER_KEY = \"user\" CFG_DIALECT_KEY =", ") STATUS_SCHEMA = os.path.join( os.path.dirname(os.path.abspath(__file__)), \"schemas\", \"status_schema.yaml\" ) STATUS_TABLE_SCHEMA = os.path.join( os.path.dirname(os.path.abspath(__file__)), \"schemas\",", "\"inspect\" REMOVE_CMD = \"remove\" RETRIEVE_CMD = \"retrieve\" STATUS_CMD = \"status\" SUBPARSER_MSGS = {", "\"title\": {\"type\": \"string\"}, }, \"required\": [\"path\", \"title\"], }, } ENV_VARS = { \"namespace\":", "DB_RELATIONSHIP_KEY = \"relationship\" DB_RELATIONSHIP_NAME_KEY = \"name\" DB_RELATIONSHIP_TABLE_KEY = \"table\" DB_RELATIONSHIP_COL_KEY = \"column\" DB_RELATIONSHIP_BACKREF_KEY", "\"image\": dict, \"file\": dict, \"string\": str, \"array\": list, \"boolean\": bool, } SQL_CLASSES_BY_TYPE =", "= \"_db_session\" DB_SCOPED_SESSION_KEY = \"_db_scoped_session\" DB_ENGINE_KEY = \"_db_engine\" HIGHLIGHTED_KEY = \"_highlighted\" DB_COLUMN_KEY =", "mssql CFG_DRIVER_KEY = \"driver\" DB_CREDENTIALS = [ CFG_HOST_KEY, CFG_PORT_KEY, CFG_PASSWORD_KEY, CFG_USER_KEY, CFG_NAME_KEY, CFG_DIALECT_KEY,", "= { \"image\": { \"type\": \"object\", \"properties\": { \"path\": {\"type\": \"string\"}, \"thumbnail_path\": {\"type\":", "= { \"number\": Float, \"integer\": Integer, \"object\": JSONB, \"image\": JSONB, \"file\": JSONB, \"string\":", "= { \"namespace\": \"PIPESTAT_NAMESPACE\", \"config\": \"PIPESTAT_CONFIG\", \"results_file\": \"PIPESTAT_RESULTS_FILE\", \"schema\": \"PIPESTAT_RESULTS_SCHEMA\", \"status_schema\": \"PIPESTAT_SATUS_SCHEMA\", \"record_identifier\":", "result.\", STATUS_CMD: \"Manage pipeline status.\", } STATUS_GET_CMD = \"get\" STATUS_SET_CMD = \"set\" STATUS_SUBPARSER_MESSAGES", "a result.\", INSPECT_CMD: \"Inspect a database.\", REMOVE_CMD: \"Remove a result.\", RETRIEVE_CMD: \"Retrieve a", "\"schemas\", \"pipestat_config_schema.yaml\" ) STATUS_SCHEMA = os.path.join( os.path.dirname(os.path.abspath(__file__)), \"schemas\", \"status_schema.yaml\" ) STATUS_TABLE_SCHEMA = os.path.join(", "DB_CREDENTIALS = [ CFG_HOST_KEY, CFG_PORT_KEY, CFG_PASSWORD_KEY, CFG_USER_KEY, CFG_NAME_KEY, CFG_DIALECT_KEY, CFG_DRIVER_KEY, ] # object", "DB_ORMS_KEY = \"_orms\" DATA_KEY = \"_data\" NAME_KEY = \"_name\" FILE_KEY = \"_file\" RECORD_ID_KEY", "\"title\": {\"type\": \"string\"}, }, \"required\": [\"path\", \"thumbnail_path\", \"title\"], }, \"file\": { \"type\": \"object\",", "CFG_DRIVER_KEY, ] # object attribute names DB_ONLY_KEY = \"_database_only\" CONFIG_KEY = \"_config\" SCHEMA_KEY", "[\"path\", \"title\"], }, } ENV_VARS = { \"namespace\": \"PIPESTAT_NAMESPACE\", \"config\": \"PIPESTAT_CONFIG\", \"results_file\": \"PIPESTAT_RESULTS_FILE\",", "or mssql CFG_DRIVER_KEY = \"driver\" DB_CREDENTIALS = [ CFG_HOST_KEY, CFG_PORT_KEY, CFG_PASSWORD_KEY, CFG_USER_KEY, CFG_NAME_KEY,", "\"_record_id\" DB_SESSION_KEY = \"_db_session\" DB_SCOPED_SESSION_KEY = \"_db_scoped_session\" DB_ENGINE_KEY = \"_db_engine\" HIGHLIGHTED_KEY = \"_highlighted\"", "int, \"object\": dict, \"image\": dict, \"file\": dict, \"string\": str, \"array\": list, \"boolean\": bool,", "\"string\"}, \"title\": {\"type\": \"string\"}, }, \"required\": [\"path\", \"title\"], }, } ENV_VARS = {", "\"_db_session\" DB_SCOPED_SESSION_KEY = \"_db_scoped_session\" DB_ENGINE_KEY = \"_db_engine\" HIGHLIGHTED_KEY = \"_highlighted\" DB_COLUMN_KEY = \"db_column\"", "DB_SCOPED_SESSION_KEY = \"_db_scoped_session\" DB_ENGINE_KEY = \"_db_engine\" HIGHLIGHTED_KEY = \"_highlighted\" DB_COLUMN_KEY = \"db_column\" DB_RELATIONSHIP_KEY", "dict, \"image\": dict, \"file\": dict, \"string\": str, \"array\": list, \"boolean\": bool, } SQL_CLASSES_BY_TYPE", "ID = \"id\" RECORD_ID = \"record_identifier\" STATUS = \"status\" RESERVED_COLNAMES = [ID, RECORD_ID]", "= \"retrieve\" STATUS_CMD = \"status\" SUBPARSER_MSGS = { REPORT_CMD: \"Report a result.\", INSPECT_CMD:", "\"name\" DB_RELATIONSHIP_TABLE_KEY = \"table\" DB_RELATIONSHIP_COL_KEY = \"column\" DB_RELATIONSHIP_BACKREF_KEY = \"backref\" DB_RELATIONSHIP_ELEMENTS = [", "{\"type\": \"string\"}, \"thumbnail_path\": {\"type\": \"string\"}, \"title\": {\"type\": \"string\"}, }, \"required\": [\"path\", \"thumbnail_path\", \"title\"],", "= \"backref\" DB_RELATIONSHIP_ELEMENTS = [ DB_RELATIONSHIP_BACKREF_KEY, DB_RELATIONSHIP_COL_KEY, DB_RELATIONSHIP_NAME_KEY, DB_RELATIONSHIP_TABLE_KEY, ] # schema keys", "CANONICAL_TYPES = { \"image\": { \"type\": \"object\", \"properties\": { \"path\": {\"type\": \"string\"}, \"thumbnail_path\":", "DB_RELATIONSHIP_BACKREF_KEY, DB_RELATIONSHIP_COL_KEY, DB_RELATIONSHIP_NAME_KEY, DB_RELATIONSHIP_TABLE_KEY, ] # schema keys SCHEMA_PROP_KEY = \"properties\" SCHEMA_TYPE_KEY =", "\"properties\": { \"path\": {\"type\": \"string\"}, \"title\": {\"type\": \"string\"}, }, \"required\": [\"path\", \"title\"], },", "\"Set status.\", STATUS_GET_CMD: \"Get status.\", } DOC_URL = \"http://pipestat.databio.org/en/latest/db_config/\" # DB config keys", "= \"pipestat\" LOCK_PREFIX = \"lock.\" REPORT_CMD = \"report\" INSPECT_CMD = \"inspect\" REMOVE_CMD =", "SCHEMA_KEY = \"_schema\" STATUS_KEY = \"_status\" STATUS_SCHEMA_KEY = \"_status_schema\" STATUS_SCHEMA_SOURCE_KEY = \"_status_schema_source\" STATUS_FILE_DIR", "= \"properties\" SCHEMA_TYPE_KEY = \"type\" SCHEMA_DESC_KEY = \"description\" # DB column names ID", "\"string\"}, }, \"required\": [\"path\", \"title\"], }, } ENV_VARS = { \"namespace\": \"PIPESTAT_NAMESPACE\", \"config\":", "[ CFG_HOST_KEY, CFG_PORT_KEY, CFG_PASSWORD_KEY, CFG_USER_KEY, CFG_NAME_KEY, CFG_DIALECT_KEY, CFG_DRIVER_KEY, ] # object attribute names", "\"_schema\" STATUS_KEY = \"_status\" STATUS_SCHEMA_KEY = \"_status_schema\" STATUS_SCHEMA_SOURCE_KEY = \"_status_schema_source\" STATUS_FILE_DIR = \"_status_file_dir\"", "\"_status\" STATUS_SCHEMA_KEY = \"_status_schema\" STATUS_SCHEMA_SOURCE_KEY = \"_status_schema_source\" STATUS_FILE_DIR = \"_status_file_dir\" RES_SCHEMAS_KEY = \"_result_schemas\"", "\"_db_scoped_session\" DB_ENGINE_KEY = \"_db_engine\" HIGHLIGHTED_KEY = \"_highlighted\" DB_COLUMN_KEY = \"db_column\" DB_RELATIONSHIP_KEY = \"relationship\"", "\"integer\": int, \"object\": dict, \"image\": dict, \"file\": dict, \"string\": str, \"array\": list, \"boolean\":", "oracle, or mssql CFG_DRIVER_KEY = \"driver\" DB_CREDENTIALS = [ CFG_HOST_KEY, CFG_PORT_KEY, CFG_PASSWORD_KEY, CFG_USER_KEY,", "= \"_status_file_dir\" RES_SCHEMAS_KEY = \"_result_schemas\" DB_BASE_KEY = \"_declarative_base\" DB_ORMS_KEY = \"_orms\" DATA_KEY =", "{ \"path\": {\"type\": \"string\"}, \"thumbnail_path\": {\"type\": \"string\"}, \"title\": {\"type\": \"string\"}, }, \"required\": [\"path\",", "[\"path\", \"thumbnail_path\", \"title\"], }, \"file\": { \"type\": \"object\", \"properties\": { \"path\": {\"type\": \"string\"},", "= [ID, RECORD_ID] CANONICAL_TYPES = { \"image\": { \"type\": \"object\", \"properties\": { \"path\":", "RETRIEVE_CMD: \"Retrieve a result.\", STATUS_CMD: \"Manage pipeline status.\", } STATUS_GET_CMD = \"get\" STATUS_SET_CMD", "\"db_column\" DB_RELATIONSHIP_KEY = \"relationship\" DB_RELATIONSHIP_NAME_KEY = \"name\" DB_RELATIONSHIP_TABLE_KEY = \"table\" DB_RELATIONSHIP_COL_KEY = \"column\"", "\"file\": JSONB, \"string\": String(500), \"array\": JSONB, \"boolean\": Boolean, } CFG_SCHEMA = os.path.join( os.path.dirname(os.path.abspath(__file__)),", "STATUS_GET_CMD: \"Get status.\", } DOC_URL = \"http://pipestat.databio.org/en/latest/db_config/\" # DB config keys CFG_DATABASE_KEY =", "\"password\" CFG_USER_KEY = \"user\" CFG_DIALECT_KEY = \"dialect\" # sqlite, mysql, postgresql, oracle, or", "\"Manage pipeline status.\", } STATUS_GET_CMD = \"get\" STATUS_SET_CMD = \"set\" STATUS_SUBPARSER_MESSAGES = {", "float, \"integer\": int, \"object\": dict, \"image\": dict, \"file\": dict, \"string\": str, \"array\": list,", "= \"_declarative_base\" DB_ORMS_KEY = \"_orms\" DATA_KEY = \"_data\" NAME_KEY = \"_name\" FILE_KEY =", "= \"_status_schema_source\" STATUS_FILE_DIR = \"_status_file_dir\" RES_SCHEMAS_KEY = \"_result_schemas\" DB_BASE_KEY = \"_declarative_base\" DB_ORMS_KEY =", "DATA_KEY = \"_data\" NAME_KEY = \"_name\" FILE_KEY = \"_file\" RECORD_ID_KEY = \"_record_id\" DB_SESSION_KEY", "names ID = \"id\" RECORD_ID = \"record_identifier\" STATUS = \"status\" RESERVED_COLNAMES = [ID,", "= \"_data\" NAME_KEY = \"_name\" FILE_KEY = \"_file\" RECORD_ID_KEY = \"_record_id\" DB_SESSION_KEY =", "\"status\" RESERVED_COLNAMES = [ID, RECORD_ID] CANONICAL_TYPES = { \"image\": { \"type\": \"object\", \"properties\":", "sqlalchemy.types import ARRAY, JSON, Boolean, Float, Integer, String PKG_NAME = \"pipestat\" LOCK_PREFIX =", "from sqlalchemy.types import ARRAY, JSON, Boolean, Float, Integer, String PKG_NAME = \"pipestat\" LOCK_PREFIX", "= \"get\" STATUS_SET_CMD = \"set\" STATUS_SUBPARSER_MESSAGES = { STATUS_SET_CMD: \"Set status.\", STATUS_GET_CMD: \"Get", "\"number\": float, \"integer\": int, \"object\": dict, \"image\": dict, \"file\": dict, \"string\": str, \"array\":", "}, \"required\": [\"path\", \"title\"], }, } ENV_VARS = { \"namespace\": \"PIPESTAT_NAMESPACE\", \"config\": \"PIPESTAT_CONFIG\",", "SCHEMA_DESC_KEY = \"description\" # DB column names ID = \"id\" RECORD_ID = \"record_identifier\"", "\"required\": [\"path\", \"title\"], }, } ENV_VARS = { \"namespace\": \"PIPESTAT_NAMESPACE\", \"config\": \"PIPESTAT_CONFIG\", \"results_file\":", "database.\", REMOVE_CMD: \"Remove a result.\", RETRIEVE_CMD: \"Retrieve a result.\", STATUS_CMD: \"Manage pipeline status.\",", "= os.path.join( os.path.dirname(os.path.abspath(__file__)), \"schemas\", \"status_schema.yaml\" ) STATUS_TABLE_SCHEMA = os.path.join( os.path.dirname(os.path.abspath(__file__)), \"schemas\", \"status_table_schema.yaml\" )", "\"status\" SUBPARSER_MSGS = { REPORT_CMD: \"Report a result.\", INSPECT_CMD: \"Inspect a database.\", REMOVE_CMD:", "\"object\": JSONB, \"image\": JSONB, \"file\": JSONB, \"string\": String(500), \"array\": JSONB, \"boolean\": Boolean, }", "\"dialect\" # sqlite, mysql, postgresql, oracle, or mssql CFG_DRIVER_KEY = \"driver\" DB_CREDENTIALS =", "PKG_NAME = \"pipestat\" LOCK_PREFIX = \"lock.\" REPORT_CMD = \"report\" INSPECT_CMD = \"inspect\" REMOVE_CMD", "\"relationship\" DB_RELATIONSHIP_NAME_KEY = \"name\" DB_RELATIONSHIP_TABLE_KEY = \"table\" DB_RELATIONSHIP_COL_KEY = \"column\" DB_RELATIONSHIP_BACKREF_KEY = \"backref\"", "{ \"number\": Float, \"integer\": Integer, \"object\": JSONB, \"image\": JSONB, \"file\": JSONB, \"string\": String(500),", "= \"database\" CFG_NAME_KEY = \"name\" CFG_HOST_KEY = \"host\" CFG_PORT_KEY = \"port\" CFG_PASSWORD_KEY =", "\"id\" RECORD_ID = \"record_identifier\" STATUS = \"status\" RESERVED_COLNAMES = [ID, RECORD_ID] CANONICAL_TYPES =", "\"path\": {\"type\": \"string\"}, \"thumbnail_path\": {\"type\": \"string\"}, \"title\": {\"type\": \"string\"}, }, \"required\": [\"path\", \"thumbnail_path\",", "a result.\", RETRIEVE_CMD: \"Retrieve a result.\", STATUS_CMD: \"Manage pipeline status.\", } STATUS_GET_CMD =", "= \"_name\" FILE_KEY = \"_file\" RECORD_ID_KEY = \"_record_id\" DB_SESSION_KEY = \"_db_session\" DB_SCOPED_SESSION_KEY =", "\"type\" SCHEMA_DESC_KEY = \"description\" # DB column names ID = \"id\" RECORD_ID =", "= \"_db_engine\" HIGHLIGHTED_KEY = \"_highlighted\" DB_COLUMN_KEY = \"db_column\" DB_RELATIONSHIP_KEY = \"relationship\" DB_RELATIONSHIP_NAME_KEY =", "}, \"required\": [\"path\", \"thumbnail_path\", \"title\"], }, \"file\": { \"type\": \"object\", \"properties\": { \"path\":", "= \"_file\" RECORD_ID_KEY = \"_record_id\" DB_SESSION_KEY = \"_db_session\" DB_SCOPED_SESSION_KEY = \"_db_scoped_session\" DB_ENGINE_KEY =", "column names ID = \"id\" RECORD_ID = \"record_identifier\" STATUS = \"status\" RESERVED_COLNAMES =", "\"type\": \"object\", \"properties\": { \"path\": {\"type\": \"string\"}, \"thumbnail_path\": {\"type\": \"string\"}, \"title\": {\"type\": \"string\"},", "\"record_identifier\": \"PIPESTAT_RECORD_ID\", } CLASSES_BY_TYPE = { \"number\": float, \"integer\": int, \"object\": dict, \"image\":", "{\"type\": \"string\"}, \"title\": {\"type\": \"string\"}, }, \"required\": [\"path\", \"title\"], }, } ENV_VARS =", "\"config\": \"PIPESTAT_CONFIG\", \"results_file\": \"PIPESTAT_RESULTS_FILE\", \"schema\": \"PIPESTAT_RESULTS_SCHEMA\", \"status_schema\": \"PIPESTAT_SATUS_SCHEMA\", \"record_identifier\": \"PIPESTAT_RECORD_ID\", } CLASSES_BY_TYPE =", "\"status_schema\": \"PIPESTAT_SATUS_SCHEMA\", \"record_identifier\": \"PIPESTAT_RECORD_ID\", } CLASSES_BY_TYPE = { \"number\": float, \"integer\": int, \"object\":", "JSONB, \"boolean\": Boolean, } CFG_SCHEMA = os.path.join( os.path.dirname(os.path.abspath(__file__)), \"schemas\", \"pipestat_config_schema.yaml\" ) STATUS_SCHEMA =", "REMOVE_CMD = \"remove\" RETRIEVE_CMD = \"retrieve\" STATUS_CMD = \"status\" SUBPARSER_MSGS = { REPORT_CMD:", "Boolean, } CFG_SCHEMA = os.path.join( os.path.dirname(os.path.abspath(__file__)), \"schemas\", \"pipestat_config_schema.yaml\" ) STATUS_SCHEMA = os.path.join( os.path.dirname(os.path.abspath(__file__)),", "\"string\"}, }, \"required\": [\"path\", \"thumbnail_path\", \"title\"], }, \"file\": { \"type\": \"object\", \"properties\": {", "\"properties\" SCHEMA_TYPE_KEY = \"type\" SCHEMA_DESC_KEY = \"description\" # DB column names ID =", "\"PIPESTAT_RESULTS_FILE\", \"schema\": \"PIPESTAT_RESULTS_SCHEMA\", \"status_schema\": \"PIPESTAT_SATUS_SCHEMA\", \"record_identifier\": \"PIPESTAT_RECORD_ID\", } CLASSES_BY_TYPE = { \"number\": float,", "DB_COLUMN_KEY = \"db_column\" DB_RELATIONSHIP_KEY = \"relationship\" DB_RELATIONSHIP_NAME_KEY = \"name\" DB_RELATIONSHIP_TABLE_KEY = \"table\" DB_RELATIONSHIP_COL_KEY", "result.\", INSPECT_CMD: \"Inspect a database.\", REMOVE_CMD: \"Remove a result.\", RETRIEVE_CMD: \"Retrieve a result.\",", "postgresql, oracle, or mssql CFG_DRIVER_KEY = \"driver\" DB_CREDENTIALS = [ CFG_HOST_KEY, CFG_PORT_KEY, CFG_PASSWORD_KEY,", "# DB column names ID = \"id\" RECORD_ID = \"record_identifier\" STATUS = \"status\"", "}, \"file\": { \"type\": \"object\", \"properties\": { \"path\": {\"type\": \"string\"}, \"title\": {\"type\": \"string\"},", "STATUS_KEY = \"_status\" STATUS_SCHEMA_KEY = \"_status_schema\" STATUS_SCHEMA_SOURCE_KEY = \"_status_schema_source\" STATUS_FILE_DIR = \"_status_file_dir\" RES_SCHEMAS_KEY", "CFG_USER_KEY, CFG_NAME_KEY, CFG_DIALECT_KEY, CFG_DRIVER_KEY, ] # object attribute names DB_ONLY_KEY = \"_database_only\" CONFIG_KEY", "= \"_orms\" DATA_KEY = \"_data\" NAME_KEY = \"_name\" FILE_KEY = \"_file\" RECORD_ID_KEY =", "\"image\": JSONB, \"file\": JSONB, \"string\": String(500), \"array\": JSONB, \"boolean\": Boolean, } CFG_SCHEMA =", "\"number\": Float, \"integer\": Integer, \"object\": JSONB, \"image\": JSONB, \"file\": JSONB, \"string\": String(500), \"array\":", "= \"_database_only\" CONFIG_KEY = \"_config\" SCHEMA_KEY = \"_schema\" STATUS_KEY = \"_status\" STATUS_SCHEMA_KEY =", "= [ DB_RELATIONSHIP_BACKREF_KEY, DB_RELATIONSHIP_COL_KEY, DB_RELATIONSHIP_NAME_KEY, DB_RELATIONSHIP_TABLE_KEY, ] # schema keys SCHEMA_PROP_KEY = \"properties\"", "{\"type\": \"string\"}, \"title\": {\"type\": \"string\"}, }, \"required\": [\"path\", \"thumbnail_path\", \"title\"], }, \"file\": {", "{ \"number\": float, \"integer\": int, \"object\": dict, \"image\": dict, \"file\": dict, \"string\": str,", "\"object\": dict, \"image\": dict, \"file\": dict, \"string\": str, \"array\": list, \"boolean\": bool, }", "= \"_status_schema\" STATUS_SCHEMA_SOURCE_KEY = \"_status_schema_source\" STATUS_FILE_DIR = \"_status_file_dir\" RES_SCHEMAS_KEY = \"_result_schemas\" DB_BASE_KEY =", "mysql, postgresql, oracle, or mssql CFG_DRIVER_KEY = \"driver\" DB_CREDENTIALS = [ CFG_HOST_KEY, CFG_PORT_KEY,", "CFG_DRIVER_KEY = \"driver\" DB_CREDENTIALS = [ CFG_HOST_KEY, CFG_PORT_KEY, CFG_PASSWORD_KEY, CFG_USER_KEY, CFG_NAME_KEY, CFG_DIALECT_KEY, CFG_DRIVER_KEY,", "}, } ENV_VARS = { \"namespace\": \"PIPESTAT_NAMESPACE\", \"config\": \"PIPESTAT_CONFIG\", \"results_file\": \"PIPESTAT_RESULTS_FILE\", \"schema\": \"PIPESTAT_RESULTS_SCHEMA\",", "String(500), \"array\": JSONB, \"boolean\": Boolean, } CFG_SCHEMA = os.path.join( os.path.dirname(os.path.abspath(__file__)), \"schemas\", \"pipestat_config_schema.yaml\" )", "REPORT_CMD = \"report\" INSPECT_CMD = \"inspect\" REMOVE_CMD = \"remove\" RETRIEVE_CMD = \"retrieve\" STATUS_CMD", "\"port\" CFG_PASSWORD_KEY = \"password\" CFG_USER_KEY = \"user\" CFG_DIALECT_KEY = \"dialect\" # sqlite, mysql,", "} CFG_SCHEMA = os.path.join( os.path.dirname(os.path.abspath(__file__)), \"schemas\", \"pipestat_config_schema.yaml\" ) STATUS_SCHEMA = os.path.join( os.path.dirname(os.path.abspath(__file__)), \"schemas\",", "= \"_schema\" STATUS_KEY = \"_status\" STATUS_SCHEMA_KEY = \"_status_schema\" STATUS_SCHEMA_SOURCE_KEY = \"_status_schema_source\" STATUS_FILE_DIR =", "\"integer\": Integer, \"object\": JSONB, \"image\": JSONB, \"file\": JSONB, \"string\": String(500), \"array\": JSONB, \"boolean\":", "\"lock.\" REPORT_CMD = \"report\" INSPECT_CMD = \"inspect\" REMOVE_CMD = \"remove\" RETRIEVE_CMD = \"retrieve\"", "RESERVED_COLNAMES = [ID, RECORD_ID] CANONICAL_TYPES = { \"image\": { \"type\": \"object\", \"properties\": {", "\"array\": JSONB, \"boolean\": Boolean, } CFG_SCHEMA = os.path.join( os.path.dirname(os.path.abspath(__file__)), \"schemas\", \"pipestat_config_schema.yaml\" ) STATUS_SCHEMA", "dict, \"file\": dict, \"string\": str, \"array\": list, \"boolean\": bool, } SQL_CLASSES_BY_TYPE = {", "os.path.dirname(os.path.abspath(__file__)), \"schemas\", \"pipestat_config_schema.yaml\" ) STATUS_SCHEMA = os.path.join( os.path.dirname(os.path.abspath(__file__)), \"schemas\", \"status_schema.yaml\" ) STATUS_TABLE_SCHEMA =", "DB_BASE_KEY = \"_declarative_base\" DB_ORMS_KEY = \"_orms\" DATA_KEY = \"_data\" NAME_KEY = \"_name\" FILE_KEY", "] # object attribute names DB_ONLY_KEY = \"_database_only\" CONFIG_KEY = \"_config\" SCHEMA_KEY =", "} CLASSES_BY_TYPE = { \"number\": float, \"integer\": int, \"object\": dict, \"image\": dict, \"file\":", "\"_status_schema_source\" STATUS_FILE_DIR = \"_status_file_dir\" RES_SCHEMAS_KEY = \"_result_schemas\" DB_BASE_KEY = \"_declarative_base\" DB_ORMS_KEY = \"_orms\"", "CFG_NAME_KEY = \"name\" CFG_HOST_KEY = \"host\" CFG_PORT_KEY = \"port\" CFG_PASSWORD_KEY = \"password\" CFG_USER_KEY", "dict, \"string\": str, \"array\": list, \"boolean\": bool, } SQL_CLASSES_BY_TYPE = { \"number\": Float,", "{ REPORT_CMD: \"Report a result.\", INSPECT_CMD: \"Inspect a database.\", REMOVE_CMD: \"Remove a result.\",", "a database.\", REMOVE_CMD: \"Remove a result.\", RETRIEVE_CMD: \"Retrieve a result.\", STATUS_CMD: \"Manage pipeline", "= \"report\" INSPECT_CMD = \"inspect\" REMOVE_CMD = \"remove\" RETRIEVE_CMD = \"retrieve\" STATUS_CMD =", "\"_status_schema\" STATUS_SCHEMA_SOURCE_KEY = \"_status_schema_source\" STATUS_FILE_DIR = \"_status_file_dir\" RES_SCHEMAS_KEY = \"_result_schemas\" DB_BASE_KEY = \"_declarative_base\"", "= \"description\" # DB column names ID = \"id\" RECORD_ID = \"record_identifier\" STATUS", "{ \"path\": {\"type\": \"string\"}, \"title\": {\"type\": \"string\"}, }, \"required\": [\"path\", \"title\"], }, }", "Integer, \"object\": JSONB, \"image\": JSONB, \"file\": JSONB, \"string\": String(500), \"array\": JSONB, \"boolean\": Boolean,", "\"schema\": \"PIPESTAT_RESULTS_SCHEMA\", \"status_schema\": \"PIPESTAT_SATUS_SCHEMA\", \"record_identifier\": \"PIPESTAT_RECORD_ID\", } CLASSES_BY_TYPE = { \"number\": float, \"integer\":", "RECORD_ID] CANONICAL_TYPES = { \"image\": { \"type\": \"object\", \"properties\": { \"path\": {\"type\": \"string\"},", "} SQL_CLASSES_BY_TYPE = { \"number\": Float, \"integer\": Integer, \"object\": JSONB, \"image\": JSONB, \"file\":", "\"retrieve\" STATUS_CMD = \"status\" SUBPARSER_MSGS = { REPORT_CMD: \"Report a result.\", INSPECT_CMD: \"Inspect", "REPORT_CMD: \"Report a result.\", INSPECT_CMD: \"Inspect a database.\", REMOVE_CMD: \"Remove a result.\", RETRIEVE_CMD:", "# sqlite, mysql, postgresql, oracle, or mssql CFG_DRIVER_KEY = \"driver\" DB_CREDENTIALS = [", "= \"driver\" DB_CREDENTIALS = [ CFG_HOST_KEY, CFG_PORT_KEY, CFG_PASSWORD_KEY, CFG_USER_KEY, CFG_NAME_KEY, CFG_DIALECT_KEY, CFG_DRIVER_KEY, ]", "CFG_HOST_KEY = \"host\" CFG_PORT_KEY = \"port\" CFG_PASSWORD_KEY = \"password\" CFG_USER_KEY = \"user\" CFG_DIALECT_KEY", "from sqlalchemy.dialects.postgresql.json import JSONB from sqlalchemy.types import ARRAY, JSON, Boolean, Float, Integer, String", "[ DB_RELATIONSHIP_BACKREF_KEY, DB_RELATIONSHIP_COL_KEY, DB_RELATIONSHIP_NAME_KEY, DB_RELATIONSHIP_TABLE_KEY, ] # schema keys SCHEMA_PROP_KEY = \"properties\" SCHEMA_TYPE_KEY", "= \"_result_schemas\" DB_BASE_KEY = \"_declarative_base\" DB_ORMS_KEY = \"_orms\" DATA_KEY = \"_data\" NAME_KEY =", "DB_RELATIONSHIP_NAME_KEY = \"name\" DB_RELATIONSHIP_TABLE_KEY = \"table\" DB_RELATIONSHIP_COL_KEY = \"column\" DB_RELATIONSHIP_BACKREF_KEY = \"backref\" DB_RELATIONSHIP_ELEMENTS", "= { REPORT_CMD: \"Report a result.\", INSPECT_CMD: \"Inspect a database.\", REMOVE_CMD: \"Remove a", "\"namespace\": \"PIPESTAT_NAMESPACE\", \"config\": \"PIPESTAT_CONFIG\", \"results_file\": \"PIPESTAT_RESULTS_FILE\", \"schema\": \"PIPESTAT_RESULTS_SCHEMA\", \"status_schema\": \"PIPESTAT_SATUS_SCHEMA\", \"record_identifier\": \"PIPESTAT_RECORD_ID\", }", "\"properties\": { \"path\": {\"type\": \"string\"}, \"thumbnail_path\": {\"type\": \"string\"}, \"title\": {\"type\": \"string\"}, }, \"required\":", "= \"set\" STATUS_SUBPARSER_MESSAGES = { STATUS_SET_CMD: \"Set status.\", STATUS_GET_CMD: \"Get status.\", } DOC_URL", "CONFIG_KEY = \"_config\" SCHEMA_KEY = \"_schema\" STATUS_KEY = \"_status\" STATUS_SCHEMA_KEY = \"_status_schema\" STATUS_SCHEMA_SOURCE_KEY", "[ID, RECORD_ID] CANONICAL_TYPES = { \"image\": { \"type\": \"object\", \"properties\": { \"path\": {\"type\":", "{\"type\": \"string\"}, }, \"required\": [\"path\", \"title\"], }, } ENV_VARS = { \"namespace\": \"PIPESTAT_NAMESPACE\",", "\"string\"}, \"title\": {\"type\": \"string\"}, }, \"required\": [\"path\", \"thumbnail_path\", \"title\"], }, \"file\": { \"type\":", "list, \"boolean\": bool, } SQL_CLASSES_BY_TYPE = { \"number\": Float, \"integer\": Integer, \"object\": JSONB,", "STATUS_SET_CMD = \"set\" STATUS_SUBPARSER_MESSAGES = { STATUS_SET_CMD: \"Set status.\", STATUS_GET_CMD: \"Get status.\", }", "\"PIPESTAT_RESULTS_SCHEMA\", \"status_schema\": \"PIPESTAT_SATUS_SCHEMA\", \"record_identifier\": \"PIPESTAT_RECORD_ID\", } CLASSES_BY_TYPE = { \"number\": float, \"integer\": int,", "= \"record_identifier\" STATUS = \"status\" RESERVED_COLNAMES = [ID, RECORD_ID] CANONICAL_TYPES = { \"image\":", "= \"inspect\" REMOVE_CMD = \"remove\" RETRIEVE_CMD = \"retrieve\" STATUS_CMD = \"status\" SUBPARSER_MSGS =", "} DOC_URL = \"http://pipestat.databio.org/en/latest/db_config/\" # DB config keys CFG_DATABASE_KEY = \"database\" CFG_NAME_KEY =", "\"Remove a result.\", RETRIEVE_CMD: \"Retrieve a result.\", STATUS_CMD: \"Manage pipeline status.\", } STATUS_GET_CMD", "\"_name\" FILE_KEY = \"_file\" RECORD_ID_KEY = \"_record_id\" DB_SESSION_KEY = \"_db_session\" DB_SCOPED_SESSION_KEY = \"_db_scoped_session\"", "config keys CFG_DATABASE_KEY = \"database\" CFG_NAME_KEY = \"name\" CFG_HOST_KEY = \"host\" CFG_PORT_KEY =", "STATUS = \"status\" RESERVED_COLNAMES = [ID, RECORD_ID] CANONICAL_TYPES = { \"image\": { \"type\":", "\"results_file\": \"PIPESTAT_RESULTS_FILE\", \"schema\": \"PIPESTAT_RESULTS_SCHEMA\", \"status_schema\": \"PIPESTAT_SATUS_SCHEMA\", \"record_identifier\": \"PIPESTAT_RECORD_ID\", } CLASSES_BY_TYPE = { \"number\":", "CFG_NAME_KEY, CFG_DIALECT_KEY, CFG_DRIVER_KEY, ] # object attribute names DB_ONLY_KEY = \"_database_only\" CONFIG_KEY =", "= \"db_column\" DB_RELATIONSHIP_KEY = \"relationship\" DB_RELATIONSHIP_NAME_KEY = \"name\" DB_RELATIONSHIP_TABLE_KEY = \"table\" DB_RELATIONSHIP_COL_KEY =", "= \"_config\" SCHEMA_KEY = \"_schema\" STATUS_KEY = \"_status\" STATUS_SCHEMA_KEY = \"_status_schema\" STATUS_SCHEMA_SOURCE_KEY =", "STATUS_SUBPARSER_MESSAGES = { STATUS_SET_CMD: \"Set status.\", STATUS_GET_CMD: \"Get status.\", } DOC_URL = \"http://pipestat.databio.org/en/latest/db_config/\"", "STATUS_SCHEMA_KEY = \"_status_schema\" STATUS_SCHEMA_SOURCE_KEY = \"_status_schema_source\" STATUS_FILE_DIR = \"_status_file_dir\" RES_SCHEMAS_KEY = \"_result_schemas\" DB_BASE_KEY", "\"_db_engine\" HIGHLIGHTED_KEY = \"_highlighted\" DB_COLUMN_KEY = \"db_column\" DB_RELATIONSHIP_KEY = \"relationship\" DB_RELATIONSHIP_NAME_KEY = \"name\"", "CFG_DIALECT_KEY, CFG_DRIVER_KEY, ] # object attribute names DB_ONLY_KEY = \"_database_only\" CONFIG_KEY = \"_config\"", "\"thumbnail_path\": {\"type\": \"string\"}, \"title\": {\"type\": \"string\"}, }, \"required\": [\"path\", \"thumbnail_path\", \"title\"], }, \"file\":", "SCHEMA_PROP_KEY = \"properties\" SCHEMA_TYPE_KEY = \"type\" SCHEMA_DESC_KEY = \"description\" # DB column names", "\"file\": dict, \"string\": str, \"array\": list, \"boolean\": bool, } SQL_CLASSES_BY_TYPE = { \"number\":", "\"driver\" DB_CREDENTIALS = [ CFG_HOST_KEY, CFG_PORT_KEY, CFG_PASSWORD_KEY, CFG_USER_KEY, CFG_NAME_KEY, CFG_DIALECT_KEY, CFG_DRIVER_KEY, ] #", "\"pipestat_config_schema.yaml\" ) STATUS_SCHEMA = os.path.join( os.path.dirname(os.path.abspath(__file__)), \"schemas\", \"status_schema.yaml\" ) STATUS_TABLE_SCHEMA = os.path.join( os.path.dirname(os.path.abspath(__file__)),", "Float, Integer, String PKG_NAME = \"pipestat\" LOCK_PREFIX = \"lock.\" REPORT_CMD = \"report\" INSPECT_CMD", "DB_RELATIONSHIP_COL_KEY, DB_RELATIONSHIP_NAME_KEY, DB_RELATIONSHIP_TABLE_KEY, ] # schema keys SCHEMA_PROP_KEY = \"properties\" SCHEMA_TYPE_KEY = \"type\"", "JSONB, \"image\": JSONB, \"file\": JSONB, \"string\": String(500), \"array\": JSONB, \"boolean\": Boolean, } CFG_SCHEMA", "ARRAY, JSON, Boolean, Float, Integer, String PKG_NAME = \"pipestat\" LOCK_PREFIX = \"lock.\" REPORT_CMD", "CFG_PASSWORD_KEY = \"password\" CFG_USER_KEY = \"user\" CFG_DIALECT_KEY = \"dialect\" # sqlite, mysql, postgresql,", "\"PIPESTAT_SATUS_SCHEMA\", \"record_identifier\": \"PIPESTAT_RECORD_ID\", } CLASSES_BY_TYPE = { \"number\": float, \"integer\": int, \"object\": dict,", "STATUS_CMD = \"status\" SUBPARSER_MSGS = { REPORT_CMD: \"Report a result.\", INSPECT_CMD: \"Inspect a", "object attribute names DB_ONLY_KEY = \"_database_only\" CONFIG_KEY = \"_config\" SCHEMA_KEY = \"_schema\" STATUS_KEY", "\"type\": \"object\", \"properties\": { \"path\": {\"type\": \"string\"}, \"title\": {\"type\": \"string\"}, }, \"required\": [\"path\",", "DB_RELATIONSHIP_COL_KEY = \"column\" DB_RELATIONSHIP_BACKREF_KEY = \"backref\" DB_RELATIONSHIP_ELEMENTS = [ DB_RELATIONSHIP_BACKREF_KEY, DB_RELATIONSHIP_COL_KEY, DB_RELATIONSHIP_NAME_KEY, DB_RELATIONSHIP_TABLE_KEY,", "\"Report a result.\", INSPECT_CMD: \"Inspect a database.\", REMOVE_CMD: \"Remove a result.\", RETRIEVE_CMD: \"Retrieve", "= \"lock.\" REPORT_CMD = \"report\" INSPECT_CMD = \"inspect\" REMOVE_CMD = \"remove\" RETRIEVE_CMD =", "DB_RELATIONSHIP_ELEMENTS = [ DB_RELATIONSHIP_BACKREF_KEY, DB_RELATIONSHIP_COL_KEY, DB_RELATIONSHIP_NAME_KEY, DB_RELATIONSHIP_TABLE_KEY, ] # schema keys SCHEMA_PROP_KEY =", "STATUS_SET_CMD: \"Set status.\", STATUS_GET_CMD: \"Get status.\", } DOC_URL = \"http://pipestat.databio.org/en/latest/db_config/\" # DB config", "= \"password\" CFG_USER_KEY = \"user\" CFG_DIALECT_KEY = \"dialect\" # sqlite, mysql, postgresql, oracle,", "attribute names DB_ONLY_KEY = \"_database_only\" CONFIG_KEY = \"_config\" SCHEMA_KEY = \"_schema\" STATUS_KEY =", "JSONB from sqlalchemy.types import ARRAY, JSON, Boolean, Float, Integer, String PKG_NAME = \"pipestat\"", "\"record_identifier\" STATUS = \"status\" RESERVED_COLNAMES = [ID, RECORD_ID] CANONICAL_TYPES = { \"image\": {", "\"name\" CFG_HOST_KEY = \"host\" CFG_PORT_KEY = \"port\" CFG_PASSWORD_KEY = \"password\" CFG_USER_KEY = \"user\"", "= \"column\" DB_RELATIONSHIP_BACKREF_KEY = \"backref\" DB_RELATIONSHIP_ELEMENTS = [ DB_RELATIONSHIP_BACKREF_KEY, DB_RELATIONSHIP_COL_KEY, DB_RELATIONSHIP_NAME_KEY, DB_RELATIONSHIP_TABLE_KEY, ]", "\"_result_schemas\" DB_BASE_KEY = \"_declarative_base\" DB_ORMS_KEY = \"_orms\" DATA_KEY = \"_data\" NAME_KEY = \"_name\"", "{ \"image\": { \"type\": \"object\", \"properties\": { \"path\": {\"type\": \"string\"}, \"thumbnail_path\": {\"type\": \"string\"},", "= \"dialect\" # sqlite, mysql, postgresql, oracle, or mssql CFG_DRIVER_KEY = \"driver\" DB_CREDENTIALS", "RES_SCHEMAS_KEY = \"_result_schemas\" DB_BASE_KEY = \"_declarative_base\" DB_ORMS_KEY = \"_orms\" DATA_KEY = \"_data\" NAME_KEY", "str, \"array\": list, \"boolean\": bool, } SQL_CLASSES_BY_TYPE = { \"number\": Float, \"integer\": Integer,", "CFG_PORT_KEY, CFG_PASSWORD_KEY, CFG_USER_KEY, CFG_NAME_KEY, CFG_DIALECT_KEY, CFG_DRIVER_KEY, ] # object attribute names DB_ONLY_KEY =", "\"path\": {\"type\": \"string\"}, \"title\": {\"type\": \"string\"}, }, \"required\": [\"path\", \"title\"], }, } ENV_VARS", "DB_SESSION_KEY = \"_db_session\" DB_SCOPED_SESSION_KEY = \"_db_scoped_session\" DB_ENGINE_KEY = \"_db_engine\" HIGHLIGHTED_KEY = \"_highlighted\" DB_COLUMN_KEY", "Boolean, Float, Integer, String PKG_NAME = \"pipestat\" LOCK_PREFIX = \"lock.\" REPORT_CMD = \"report\"", "DB_ONLY_KEY = \"_database_only\" CONFIG_KEY = \"_config\" SCHEMA_KEY = \"_schema\" STATUS_KEY = \"_status\" STATUS_SCHEMA_KEY", "= [ CFG_HOST_KEY, CFG_PORT_KEY, CFG_PASSWORD_KEY, CFG_USER_KEY, CFG_NAME_KEY, CFG_DIALECT_KEY, CFG_DRIVER_KEY, ] # object attribute", "{ STATUS_SET_CMD: \"Set status.\", STATUS_GET_CMD: \"Get status.\", } DOC_URL = \"http://pipestat.databio.org/en/latest/db_config/\" # DB", "\"thumbnail_path\", \"title\"], }, \"file\": { \"type\": \"object\", \"properties\": { \"path\": {\"type\": \"string\"}, \"title\":", "{ \"type\": \"object\", \"properties\": { \"path\": {\"type\": \"string\"}, \"title\": {\"type\": \"string\"}, }, \"required\":", "= \"remove\" RETRIEVE_CMD = \"retrieve\" STATUS_CMD = \"status\" SUBPARSER_MSGS = { REPORT_CMD: \"Report", "\"string\": str, \"array\": list, \"boolean\": bool, } SQL_CLASSES_BY_TYPE = { \"number\": Float, \"integer\":", "DOC_URL = \"http://pipestat.databio.org/en/latest/db_config/\" # DB config keys CFG_DATABASE_KEY = \"database\" CFG_NAME_KEY = \"name\"", "\"required\": [\"path\", \"thumbnail_path\", \"title\"], }, \"file\": { \"type\": \"object\", \"properties\": { \"path\": {\"type\":", "= \"status\" SUBPARSER_MSGS = { REPORT_CMD: \"Report a result.\", INSPECT_CMD: \"Inspect a database.\",", "\"boolean\": bool, } SQL_CLASSES_BY_TYPE = { \"number\": Float, \"integer\": Integer, \"object\": JSONB, \"image\":", "\"get\" STATUS_SET_CMD = \"set\" STATUS_SUBPARSER_MESSAGES = { STATUS_SET_CMD: \"Set status.\", STATUS_GET_CMD: \"Get status.\",", "\"boolean\": Boolean, } CFG_SCHEMA = os.path.join( os.path.dirname(os.path.abspath(__file__)), \"schemas\", \"pipestat_config_schema.yaml\" ) STATUS_SCHEMA = os.path.join(", "\"title\"], }, \"file\": { \"type\": \"object\", \"properties\": { \"path\": {\"type\": \"string\"}, \"title\": {\"type\":", "} ENV_VARS = { \"namespace\": \"PIPESTAT_NAMESPACE\", \"config\": \"PIPESTAT_CONFIG\", \"results_file\": \"PIPESTAT_RESULTS_FILE\", \"schema\": \"PIPESTAT_RESULTS_SCHEMA\", \"status_schema\":", "\"pipestat\" LOCK_PREFIX = \"lock.\" REPORT_CMD = \"report\" INSPECT_CMD = \"inspect\" REMOVE_CMD = \"remove\"", "a result.\", STATUS_CMD: \"Manage pipeline status.\", } STATUS_GET_CMD = \"get\" STATUS_SET_CMD = \"set\"" ]
[ "[ afhsb, cdc, covid_hosp_facility_lookup, covid_hosp_facility, covid_hosp_state_timeseries, covidcast_nowcast, covidcast_meta, covidcast, delphi, dengue_nowcast, dengue_sensors, ecdc_ili,", "nidss_dengue, nidss_flu, norostat, nowcast, paho_dengue, quidel, sensors, twitter, wiki, signal_dashboard_status, signal_dashboard_coverage, ] __all__", "import ( afhsb, cdc, covid_hosp_facility_lookup, covid_hosp_facility, covid_hosp_state_timeseries, covidcast_nowcast, covidcast_meta, covidcast, delphi, dengue_nowcast, dengue_sensors,", "kcdc_ili, meta_afhsb, meta_norostat, meta, nidss_dengue, nidss_flu, norostat, nowcast, paho_dengue, quidel, sensors, twitter, wiki,", "meta_afhsb, meta_norostat, meta, nidss_dengue, nidss_flu, norostat, nowcast, paho_dengue, quidel, sensors, twitter, wiki, signal_dashboard_status,", "covidcast, delphi, dengue_nowcast, dengue_sensors, ecdc_ili, flusurv, fluview_clinicial, fluview_meta, fluview, gft, ght, ilinet, kcdc_ili,", "norostat, nowcast, paho_dengue, quidel, sensors, twitter, wiki, signal_dashboard_status, signal_dashboard_coverage, ] __all__ = [\"endpoints\"]", "fluview_meta, fluview, gft, ght, ilinet, kcdc_ili, meta_afhsb, meta_norostat, meta, nidss_dengue, nidss_flu, norostat, nowcast,", "gft, ght, ilinet, kcdc_ili, meta_afhsb, meta_norostat, meta, nidss_dengue, nidss_flu, norostat, nowcast, paho_dengue, quidel,", "endpoints = [ afhsb, cdc, covid_hosp_facility_lookup, covid_hosp_facility, covid_hosp_state_timeseries, covidcast_nowcast, covidcast_meta, covidcast, delphi, dengue_nowcast,", "meta_norostat, meta, nidss_dengue, nidss_flu, norostat, nowcast, paho_dengue, quidel, sensors, twitter, wiki, signal_dashboard_status, signal_dashboard_coverage,", "afhsb, cdc, covid_hosp_facility_lookup, covid_hosp_facility, covid_hosp_state_timeseries, covidcast_nowcast, covidcast_meta, covidcast, delphi, dengue_nowcast, dengue_sensors, ecdc_ili, flusurv,", "signal_dashboard_coverage, ) endpoints = [ afhsb, cdc, covid_hosp_facility_lookup, covid_hosp_facility, covid_hosp_state_timeseries, covidcast_nowcast, covidcast_meta, covidcast,", "covid_hosp_facility, covid_hosp_state_timeseries, covidcast_nowcast, covidcast_meta, covidcast, delphi, dengue_nowcast, dengue_sensors, ecdc_ili, flusurv, fluview_clinicial, fluview_meta, fluview,", "dengue_nowcast, dengue_sensors, ecdc_ili, flusurv, fluview_clinicial, fluview_meta, fluview, gft, ght, ilinet, kcdc_ili, meta_afhsb, meta_norostat,", ") endpoints = [ afhsb, cdc, covid_hosp_facility_lookup, covid_hosp_facility, covid_hosp_state_timeseries, covidcast_nowcast, covidcast_meta, covidcast, delphi,", "fluview_clinicial, fluview_meta, fluview, gft, ght, ilinet, kcdc_ili, meta_afhsb, meta_norostat, meta, nidss_dengue, nidss_flu, norostat,", "covid_hosp_state_timeseries, covidcast_nowcast, covidcast_meta, covidcast, delphi, dengue_nowcast, dengue_sensors, ecdc_ili, flusurv, fluview_clinicial, fluview_meta, fluview, gft,", "from . import ( afhsb, cdc, covid_hosp_facility_lookup, covid_hosp_facility, covid_hosp_state_timeseries, covidcast_nowcast, covidcast_meta, covidcast, delphi,", "sensors, twitter, wiki, signal_dashboard_status, signal_dashboard_coverage, ) endpoints = [ afhsb, cdc, covid_hosp_facility_lookup, covid_hosp_facility,", ". import ( afhsb, cdc, covid_hosp_facility_lookup, covid_hosp_facility, covid_hosp_state_timeseries, covidcast_nowcast, covidcast_meta, covidcast, delphi, dengue_nowcast,", "( afhsb, cdc, covid_hosp_facility_lookup, covid_hosp_facility, covid_hosp_state_timeseries, covidcast_nowcast, covidcast_meta, covidcast, delphi, dengue_nowcast, dengue_sensors, ecdc_ili,", "dengue_sensors, ecdc_ili, flusurv, fluview_clinicial, fluview_meta, fluview, gft, ght, ilinet, kcdc_ili, meta_afhsb, meta_norostat, meta,", "wiki, signal_dashboard_status, signal_dashboard_coverage, ) endpoints = [ afhsb, cdc, covid_hosp_facility_lookup, covid_hosp_facility, covid_hosp_state_timeseries, covidcast_nowcast,", "cdc, covid_hosp_facility_lookup, covid_hosp_facility, covid_hosp_state_timeseries, covidcast_nowcast, covidcast_meta, covidcast, delphi, dengue_nowcast, dengue_sensors, ecdc_ili, flusurv, fluview_clinicial,", "nidss_flu, norostat, nowcast, paho_dengue, quidel, sensors, twitter, wiki, signal_dashboard_status, signal_dashboard_coverage, ) endpoints =", "meta, nidss_dengue, nidss_flu, norostat, nowcast, paho_dengue, quidel, sensors, twitter, wiki, signal_dashboard_status, signal_dashboard_coverage, ]", "covid_hosp_facility_lookup, covid_hosp_facility, covid_hosp_state_timeseries, covidcast_nowcast, covidcast_meta, covidcast, delphi, dengue_nowcast, dengue_sensors, ecdc_ili, flusurv, fluview_clinicial, fluview_meta,", "covidcast_meta, covidcast, delphi, dengue_nowcast, dengue_sensors, ecdc_ili, flusurv, fluview_clinicial, fluview_meta, fluview, gft, ght, ilinet,", "meta, nidss_dengue, nidss_flu, norostat, nowcast, paho_dengue, quidel, sensors, twitter, wiki, signal_dashboard_status, signal_dashboard_coverage, )", "nidss_dengue, nidss_flu, norostat, nowcast, paho_dengue, quidel, sensors, twitter, wiki, signal_dashboard_status, signal_dashboard_coverage, ) endpoints", "covidcast_nowcast, covidcast_meta, covidcast, delphi, dengue_nowcast, dengue_sensors, ecdc_ili, flusurv, fluview_clinicial, fluview_meta, fluview, gft, ght,", "nowcast, paho_dengue, quidel, sensors, twitter, wiki, signal_dashboard_status, signal_dashboard_coverage, ) endpoints = [ afhsb,", "norostat, nowcast, paho_dengue, quidel, sensors, twitter, wiki, signal_dashboard_status, signal_dashboard_coverage, ) endpoints = [", "nidss_flu, norostat, nowcast, paho_dengue, quidel, sensors, twitter, wiki, signal_dashboard_status, signal_dashboard_coverage, ] __all__ =", "quidel, sensors, twitter, wiki, signal_dashboard_status, signal_dashboard_coverage, ) endpoints = [ afhsb, cdc, covid_hosp_facility_lookup,", "ecdc_ili, flusurv, fluview_clinicial, fluview_meta, fluview, gft, ght, ilinet, kcdc_ili, meta_afhsb, meta_norostat, meta, nidss_dengue,", "ilinet, kcdc_ili, meta_afhsb, meta_norostat, meta, nidss_dengue, nidss_flu, norostat, nowcast, paho_dengue, quidel, sensors, twitter,", "twitter, wiki, signal_dashboard_status, signal_dashboard_coverage, ) endpoints = [ afhsb, cdc, covid_hosp_facility_lookup, covid_hosp_facility, covid_hosp_state_timeseries,", "fluview, gft, ght, ilinet, kcdc_ili, meta_afhsb, meta_norostat, meta, nidss_dengue, nidss_flu, norostat, nowcast, paho_dengue,", "ght, ilinet, kcdc_ili, meta_afhsb, meta_norostat, meta, nidss_dengue, nidss_flu, norostat, nowcast, paho_dengue, quidel, sensors,", "flusurv, fluview_clinicial, fluview_meta, fluview, gft, ght, ilinet, kcdc_ili, meta_afhsb, meta_norostat, meta, nidss_dengue, nidss_flu,", "delphi, dengue_nowcast, dengue_sensors, ecdc_ili, flusurv, fluview_clinicial, fluview_meta, fluview, gft, ght, ilinet, kcdc_ili, meta_afhsb,", "signal_dashboard_status, signal_dashboard_coverage, ) endpoints = [ afhsb, cdc, covid_hosp_facility_lookup, covid_hosp_facility, covid_hosp_state_timeseries, covidcast_nowcast, covidcast_meta,", "= [ afhsb, cdc, covid_hosp_facility_lookup, covid_hosp_facility, covid_hosp_state_timeseries, covidcast_nowcast, covidcast_meta, covidcast, delphi, dengue_nowcast, dengue_sensors,", "paho_dengue, quidel, sensors, twitter, wiki, signal_dashboard_status, signal_dashboard_coverage, ) endpoints = [ afhsb, cdc," ]
[ "id): pass class LoggingLedController(LedController): def reset(self): logging.info('Reset') def set(self, id): logging.info('set {}'.format(id)) #", "connected LEDs class I2CLedController(LoggingLedController): def __init__(self): self.bus = smbus2.SMBus(1) self.bus.write_byte_data(0x20, 0x00, 0x00) self.bus.write_byte_data(0x20,", "job(): address = get_active_address() if address < 0: controller.reset() else: controller.set(address) def get_mock_address():", "PROD_ENV = 'http://10.24.6.35/api/v1/display' url = TEST_ENV color = (0, 0, 255) controller =", "r.json() if (data['state'] != 'OPERATION'): logging.debug('Not operation state.') return -1 if 'operation' not", "super(WS2812LedController, self).reset() self._pixels.fill((0, 0, 0)) self._pixels.show() def set(self, id): super(WS2812LedController, self).set(id) self._pixels.fill((0, 0,", "import BlockingScheduler class LedController: def reset(self): pass def set(self, id): pass class LoggingLedController(LedController):", "not in realEstate: logging.debug('No folderAddress.') return -1 folderAddress = int(realEstate['folderAddress']) return folderAddress except", "def reset(self): super(I2CLedController, self).reset() self.bus.write_byte_data(0x20, 0x14, 0x00) self.bus.write_byte_data(0x20, 0x15, 0x00) def set(self, id):", "{}'.format(id)) # Controller for I2C connected LEDs class I2CLedController(LoggingLedController): def __init__(self): self.bus =", "controller.reset() else: controller.set(address) def get_mock_address(): return random.randint(-1, 100) def get_active_address(): try: r =", "id % 8 self.bus.write_byte_data(0x20, register, bitmask) # Controller for WS2812 connected LEDs class", "100) def get_active_address(): try: r = requests.get(url, timeout=2) data = r.json() if (data['state']", "logging.info('Starting process.') scheduler = BlockingScheduler() scheduler.add_job(job, 'interval', seconds=5) try: scheduler.start() except (KeyboardInterrupt): controller.reset()", "BASIC OPTIONS logging.basicConfig(level=logging.INFO) TEST_ENV = 'http://192.168.0.199:8080/v1/display' PROD_ENV = 'http://10.24.6.35/api/v1/display' url = TEST_ENV color", "smbus2.SMBus(1) self.bus.write_byte_data(0x20, 0x00, 0x00) self.bus.write_byte_data(0x20, 0x01, 0x00) def reset(self): super(I2CLedController, self).reset() self.bus.write_byte_data(0x20, 0x14,", "logging.debug('No folderAddress.') return -1 folderAddress = int(realEstate['folderAddress']) return folderAddress except Exception as e:", "folderAddress except Exception as e: logging.warn('Exception when getting data.') logging.warn(e) return -1 def", "logging import random import requests import board import neopixel import smbus2 from apscheduler.schedulers.blocking", "import board import neopixel import smbus2 from apscheduler.schedulers.blocking import BlockingScheduler class LedController: def", "def reset(self): super(WS2812LedController, self).reset() self._pixels.fill((0, 0, 0)) self._pixels.show() def set(self, id): super(WS2812LedController, self).set(id)", "def set(self, id): logging.info('set {}'.format(id)) # Controller for I2C connected LEDs class I2CLedController(LoggingLedController):", "logging.debug('Not operation state.') return -1 if 'operation' not in data: logging.debug('No operation.') return", "= smbus2.SMBus(1) self.bus.write_byte_data(0x20, 0x00, 0x00) self.bus.write_byte_data(0x20, 0x01, 0x00) def reset(self): super(I2CLedController, self).reset() self.bus.write_byte_data(0x20,", "144, auto_write=False) self._pixels.fill((0, 0, 0)) self._pixels.show() def reset(self): super(WS2812LedController, self).reset() self._pixels.fill((0, 0, 0))", "class LedController: def reset(self): pass def set(self, id): pass class LoggingLedController(LedController): def reset(self):", "if 'folderAddress' not in realEstate: logging.debug('No folderAddress.') return -1 folderAddress = int(realEstate['folderAddress']) return", "(data['state'] != 'OPERATION'): logging.debug('Not operation state.') return -1 if 'operation' not in data:", "I2C connected LEDs class I2CLedController(LoggingLedController): def __init__(self): self.bus = smbus2.SMBus(1) self.bus.write_byte_data(0x20, 0x00, 0x00)", "= 0x14 if id / 8 > 0: register = 0x15 bitmask =", "> 0: register = 0x15 bitmask = id % 8 self.bus.write_byte_data(0x20, register, bitmask)", "0, 0)) self._pixels.show() def reset(self): super(WS2812LedController, self).reset() self._pixels.fill((0, 0, 0)) self._pixels.show() def set(self,", "operation state.') return -1 if 'operation' not in data: logging.debug('No operation.') return -1", "data: logging.debug('No operation.') return -1 operation = data['operation'] if 'realEstate' not in operation:", "self.bus = smbus2.SMBus(1) self.bus.write_byte_data(0x20, 0x00, 0x00) self.bus.write_byte_data(0x20, 0x01, 0x00) def reset(self): super(I2CLedController, self).reset()", "= self._color self._pixels.show() # BASIC OPTIONS logging.basicConfig(level=logging.INFO) TEST_ENV = 'http://192.168.0.199:8080/v1/display' PROD_ENV = 'http://10.24.6.35/api/v1/display'", "= (0, 0, 255) controller = WS2812LedController(color) def job(): address = get_active_address() if", "0x00) def set(self, id): super(I2CLedController, self).set(id) register = 0x14 if id / 8", "class WS2812LedController(LedController): def __init__(self, color): self._color = color self._pixels = neopixel.NeoPixel(board.D18, 144, auto_write=False)", "OPTIONS logging.basicConfig(level=logging.INFO) TEST_ENV = 'http://192.168.0.199:8080/v1/display' PROD_ENV = 'http://10.24.6.35/api/v1/display' url = TEST_ENV color =", "r = requests.get(url, timeout=2) data = r.json() if (data['state'] != 'OPERATION'): logging.debug('Not operation", "def init(): logging.info('Starting process.') scheduler = BlockingScheduler() scheduler.add_job(job, 'interval', seconds=5) try: scheduler.start() except", "pass class LoggingLedController(LedController): def reset(self): logging.info('Reset') def set(self, id): logging.info('set {}'.format(id)) # Controller", "= 'http://192.168.0.199:8080/v1/display' PROD_ENV = 'http://10.24.6.35/api/v1/display' url = TEST_ENV color = (0, 0, 255)", "id): super(WS2812LedController, self).set(id) self._pixels.fill((0, 0, 0)) self._pixels[id] = self._color self._pixels.show() # BASIC OPTIONS", "BlockingScheduler class LedController: def reset(self): pass def set(self, id): pass class LoggingLedController(LedController): def", "pass def set(self, id): pass class LoggingLedController(LedController): def reset(self): logging.info('Reset') def set(self, id):", "TEST_ENV color = (0, 0, 255) controller = WS2812LedController(color) def job(): address =", "self).reset() self._pixels.fill((0, 0, 0)) self._pixels.show() def set(self, id): super(WS2812LedController, self).set(id) self._pixels.fill((0, 0, 0))", "def set(self, id): pass class LoggingLedController(LedController): def reset(self): logging.info('Reset') def set(self, id): logging.info('set", "0x14 if id / 8 > 0: register = 0x15 bitmask = id", "folderAddress = int(realEstate['folderAddress']) return folderAddress except Exception as e: logging.warn('Exception when getting data.')", "0x01, 0x00) def reset(self): super(I2CLedController, self).reset() self.bus.write_byte_data(0x20, 0x14, 0x00) self.bus.write_byte_data(0x20, 0x15, 0x00) def", "= r.json() if (data['state'] != 'OPERATION'): logging.debug('Not operation state.') return -1 if 'operation'", "'realEstate' not in operation: logging.debug('No realEstate.') return -1 realEstate = operation['realEstate'] if 'folderAddress'", "WS2812LedController(color) def job(): address = get_active_address() if address < 0: controller.reset() else: controller.set(address)", "'http://10.24.6.35/api/v1/display' url = TEST_ENV color = (0, 0, 255) controller = WS2812LedController(color) def", "= BlockingScheduler() scheduler.add_job(job, 'interval', seconds=5) try: scheduler.start() except (KeyboardInterrupt): controller.reset() logging.info('Stopping process.') if", "# BASIC OPTIONS logging.basicConfig(level=logging.INFO) TEST_ENV = 'http://192.168.0.199:8080/v1/display' PROD_ENV = 'http://10.24.6.35/api/v1/display' url = TEST_ENV", "'folderAddress' not in realEstate: logging.debug('No folderAddress.') return -1 folderAddress = int(realEstate['folderAddress']) return folderAddress", "= data['operation'] if 'realEstate' not in operation: logging.debug('No realEstate.') return -1 realEstate =", "not in data: logging.debug('No operation.') return -1 operation = data['operation'] if 'realEstate' not", "neopixel.NeoPixel(board.D18, 144, auto_write=False) self._pixels.fill((0, 0, 0)) self._pixels.show() def reset(self): super(WS2812LedController, self).reset() self._pixels.fill((0, 0,", "init(): logging.info('Starting process.') scheduler = BlockingScheduler() scheduler.add_job(job, 'interval', seconds=5) try: scheduler.start() except (KeyboardInterrupt):", "data['operation'] if 'realEstate' not in operation: logging.debug('No realEstate.') return -1 realEstate = operation['realEstate']", "scheduler = BlockingScheduler() scheduler.add_job(job, 'interval', seconds=5) try: scheduler.start() except (KeyboardInterrupt): controller.reset() logging.info('Stopping process.')", "% 8 self.bus.write_byte_data(0x20, register, bitmask) # Controller for WS2812 connected LEDs class WS2812LedController(LedController):", "WS2812 connected LEDs class WS2812LedController(LedController): def __init__(self, color): self._color = color self._pixels =", "process.') scheduler = BlockingScheduler() scheduler.add_job(job, 'interval', seconds=5) try: scheduler.start() except (KeyboardInterrupt): controller.reset() logging.info('Stopping", "id / 8 > 0: register = 0x15 bitmask = id % 8", "connected LEDs class WS2812LedController(LedController): def __init__(self, color): self._color = color self._pixels = neopixel.NeoPixel(board.D18,", "set(self, id): super(WS2812LedController, self).set(id) self._pixels.fill((0, 0, 0)) self._pixels[id] = self._color self._pixels.show() # BASIC", "logging.debug('No operation.') return -1 operation = data['operation'] if 'realEstate' not in operation: logging.debug('No", "if (data['state'] != 'OPERATION'): logging.debug('Not operation state.') return -1 if 'operation' not in", "logging.warn(e) return -1 def init(): logging.info('Starting process.') scheduler = BlockingScheduler() scheduler.add_job(job, 'interval', seconds=5)", "board import neopixel import smbus2 from apscheduler.schedulers.blocking import BlockingScheduler class LedController: def reset(self):", "register = 0x15 bitmask = id % 8 self.bus.write_byte_data(0x20, register, bitmask) # Controller", "= WS2812LedController(color) def job(): address = get_active_address() if address < 0: controller.reset() else:", "except Exception as e: logging.warn('Exception when getting data.') logging.warn(e) return -1 def init():", "0, 0)) self._pixels.show() def set(self, id): super(WS2812LedController, self).set(id) self._pixels.fill((0, 0, 0)) self._pixels[id] =", "color): self._color = color self._pixels = neopixel.NeoPixel(board.D18, 144, auto_write=False) self._pixels.fill((0, 0, 0)) self._pixels.show()", "in realEstate: logging.debug('No folderAddress.') return -1 folderAddress = int(realEstate['folderAddress']) return folderAddress except Exception", "random.randint(-1, 100) def get_active_address(): try: r = requests.get(url, timeout=2) data = r.json() if", "0x00, 0x00) self.bus.write_byte_data(0x20, 0x01, 0x00) def reset(self): super(I2CLedController, self).reset() self.bus.write_byte_data(0x20, 0x14, 0x00) self.bus.write_byte_data(0x20,", "def reset(self): pass def set(self, id): pass class LoggingLedController(LedController): def reset(self): logging.info('Reset') def", "realEstate: logging.debug('No folderAddress.') return -1 folderAddress = int(realEstate['folderAddress']) return folderAddress except Exception as", "realEstate = operation['realEstate'] if 'folderAddress' not in realEstate: logging.debug('No folderAddress.') return -1 folderAddress", "color self._pixels = neopixel.NeoPixel(board.D18, 144, auto_write=False) self._pixels.fill((0, 0, 0)) self._pixels.show() def reset(self): super(WS2812LedController,", "realEstate.') return -1 realEstate = operation['realEstate'] if 'folderAddress' not in realEstate: logging.debug('No folderAddress.')", "= requests.get(url, timeout=2) data = r.json() if (data['state'] != 'OPERATION'): logging.debug('Not operation state.')", "logging.debug('No realEstate.') return -1 realEstate = operation['realEstate'] if 'folderAddress' not in realEstate: logging.debug('No", "e: logging.warn('Exception when getting data.') logging.warn(e) return -1 def init(): logging.info('Starting process.') scheduler", "= color self._pixels = neopixel.NeoPixel(board.D18, 144, auto_write=False) self._pixels.fill((0, 0, 0)) self._pixels.show() def reset(self):", "self._pixels.fill((0, 0, 0)) self._pixels.show() def reset(self): super(WS2812LedController, self).reset() self._pixels.fill((0, 0, 0)) self._pixels.show() def", "in data: logging.debug('No operation.') return -1 operation = data['operation'] if 'realEstate' not in", "8 self.bus.write_byte_data(0x20, register, bitmask) # Controller for WS2812 connected LEDs class WS2812LedController(LedController): def", "8 > 0: register = 0x15 bitmask = id % 8 self.bus.write_byte_data(0x20, register,", "if address < 0: controller.reset() else: controller.set(address) def get_mock_address(): return random.randint(-1, 100) def", "get_active_address(): try: r = requests.get(url, timeout=2) data = r.json() if (data['state'] != 'OPERATION'):", "LEDs class WS2812LedController(LedController): def __init__(self, color): self._color = color self._pixels = neopixel.NeoPixel(board.D18, 144,", "'interval', seconds=5) try: scheduler.start() except (KeyboardInterrupt): controller.reset() logging.info('Stopping process.') if __name__ == \"__main__\":", "import smbus2 from apscheduler.schedulers.blocking import BlockingScheduler class LedController: def reset(self): pass def set(self,", "from apscheduler.schedulers.blocking import BlockingScheduler class LedController: def reset(self): pass def set(self, id): pass", "self._color self._pixels.show() # BASIC OPTIONS logging.basicConfig(level=logging.INFO) TEST_ENV = 'http://192.168.0.199:8080/v1/display' PROD_ENV = 'http://10.24.6.35/api/v1/display' url", "get_active_address() if address < 0: controller.reset() else: controller.set(address) def get_mock_address(): return random.randint(-1, 100)", "0x14, 0x00) self.bus.write_byte_data(0x20, 0x15, 0x00) def set(self, id): super(I2CLedController, self).set(id) register = 0x14", "import random import requests import board import neopixel import smbus2 from apscheduler.schedulers.blocking import", "folderAddress.') return -1 folderAddress = int(realEstate['folderAddress']) return folderAddress except Exception as e: logging.warn('Exception", "reset(self): pass def set(self, id): pass class LoggingLedController(LedController): def reset(self): logging.info('Reset') def set(self,", "self.bus.write_byte_data(0x20, 0x01, 0x00) def reset(self): super(I2CLedController, self).reset() self.bus.write_byte_data(0x20, 0x14, 0x00) self.bus.write_byte_data(0x20, 0x15, 0x00)", "as e: logging.warn('Exception when getting data.') logging.warn(e) return -1 def init(): logging.info('Starting process.')", "0, 255) controller = WS2812LedController(color) def job(): address = get_active_address() if address <", "reset(self): super(I2CLedController, self).reset() self.bus.write_byte_data(0x20, 0x14, 0x00) self.bus.write_byte_data(0x20, 0x15, 0x00) def set(self, id): super(I2CLedController,", "self._pixels.fill((0, 0, 0)) self._pixels.show() def set(self, id): super(WS2812LedController, self).set(id) self._pixels.fill((0, 0, 0)) self._pixels[id]", "-1 if 'operation' not in data: logging.debug('No operation.') return -1 operation = data['operation']", "LoggingLedController(LedController): def reset(self): logging.info('Reset') def set(self, id): logging.info('set {}'.format(id)) # Controller for I2C", "for I2C connected LEDs class I2CLedController(LoggingLedController): def __init__(self): self.bus = smbus2.SMBus(1) self.bus.write_byte_data(0x20, 0x00,", "0: register = 0x15 bitmask = id % 8 self.bus.write_byte_data(0x20, register, bitmask) #", "-1 operation = data['operation'] if 'realEstate' not in operation: logging.debug('No realEstate.') return -1", "bitmask) # Controller for WS2812 connected LEDs class WS2812LedController(LedController): def __init__(self, color): self._color", "if id / 8 > 0: register = 0x15 bitmask = id %", "__init__(self): self.bus = smbus2.SMBus(1) self.bus.write_byte_data(0x20, 0x00, 0x00) self.bus.write_byte_data(0x20, 0x01, 0x00) def reset(self): super(I2CLedController,", "try: r = requests.get(url, timeout=2) data = r.json() if (data['state'] != 'OPERATION'): logging.debug('Not", "< 0: controller.reset() else: controller.set(address) def get_mock_address(): return random.randint(-1, 100) def get_active_address(): try:", "self).set(id) self._pixels.fill((0, 0, 0)) self._pixels[id] = self._color self._pixels.show() # BASIC OPTIONS logging.basicConfig(level=logging.INFO) TEST_ENV", "requests import board import neopixel import smbus2 from apscheduler.schedulers.blocking import BlockingScheduler class LedController:", "controller.set(address) def get_mock_address(): return random.randint(-1, 100) def get_active_address(): try: r = requests.get(url, timeout=2)", "'operation' not in data: logging.debug('No operation.') return -1 operation = data['operation'] if 'realEstate'", "auto_write=False) self._pixels.fill((0, 0, 0)) self._pixels.show() def reset(self): super(WS2812LedController, self).reset() self._pixels.fill((0, 0, 0)) self._pixels.show()", "register, bitmask) # Controller for WS2812 connected LEDs class WS2812LedController(LedController): def __init__(self, color):", "import neopixel import smbus2 from apscheduler.schedulers.blocking import BlockingScheduler class LedController: def reset(self): pass", "get_mock_address(): return random.randint(-1, 100) def get_active_address(): try: r = requests.get(url, timeout=2) data =", "(0, 0, 255) controller = WS2812LedController(color) def job(): address = get_active_address() if address", "set(self, id): super(I2CLedController, self).set(id) register = 0x14 if id / 8 > 0:", "'OPERATION'): logging.debug('Not operation state.') return -1 if 'operation' not in data: logging.debug('No operation.')", "Controller for WS2812 connected LEDs class WS2812LedController(LedController): def __init__(self, color): self._color = color", "# Controller for WS2812 connected LEDs class WS2812LedController(LedController): def __init__(self, color): self._color =", "class LoggingLedController(LedController): def reset(self): logging.info('Reset') def set(self, id): logging.info('set {}'.format(id)) # Controller for", "self._color = color self._pixels = neopixel.NeoPixel(board.D18, 144, auto_write=False) self._pixels.fill((0, 0, 0)) self._pixels.show() def", "def get_mock_address(): return random.randint(-1, 100) def get_active_address(): try: r = requests.get(url, timeout=2) data", "self._pixels.fill((0, 0, 0)) self._pixels[id] = self._color self._pixels.show() # BASIC OPTIONS logging.basicConfig(level=logging.INFO) TEST_ENV =", "0x00) def reset(self): super(I2CLedController, self).reset() self.bus.write_byte_data(0x20, 0x14, 0x00) self.bus.write_byte_data(0x20, 0x15, 0x00) def set(self,", "super(WS2812LedController, self).set(id) self._pixels.fill((0, 0, 0)) self._pixels[id] = self._color self._pixels.show() # BASIC OPTIONS logging.basicConfig(level=logging.INFO)", "-1 realEstate = operation['realEstate'] if 'folderAddress' not in realEstate: logging.debug('No folderAddress.') return -1", "self._pixels.show() def set(self, id): super(WS2812LedController, self).set(id) self._pixels.fill((0, 0, 0)) self._pixels[id] = self._color self._pixels.show()", "def reset(self): logging.info('Reset') def set(self, id): logging.info('set {}'.format(id)) # Controller for I2C connected", "if 'operation' not in data: logging.debug('No operation.') return -1 operation = data['operation'] if", "id): super(I2CLedController, self).set(id) register = 0x14 if id / 8 > 0: register", "= TEST_ENV color = (0, 0, 255) controller = WS2812LedController(color) def job(): address", "set(self, id): pass class LoggingLedController(LedController): def reset(self): logging.info('Reset') def set(self, id): logging.info('set {}'.format(id))", "def set(self, id): super(I2CLedController, self).set(id) register = 0x14 if id / 8 >", "reset(self): logging.info('Reset') def set(self, id): logging.info('set {}'.format(id)) # Controller for I2C connected LEDs", "bitmask = id % 8 self.bus.write_byte_data(0x20, register, bitmask) # Controller for WS2812 connected", "TEST_ENV = 'http://192.168.0.199:8080/v1/display' PROD_ENV = 'http://10.24.6.35/api/v1/display' url = TEST_ENV color = (0, 0,", "I2CLedController(LoggingLedController): def __init__(self): self.bus = smbus2.SMBus(1) self.bus.write_byte_data(0x20, 0x00, 0x00) self.bus.write_byte_data(0x20, 0x01, 0x00) def", "def set(self, id): super(WS2812LedController, self).set(id) self._pixels.fill((0, 0, 0)) self._pixels[id] = self._color self._pixels.show() #", "LEDs class I2CLedController(LoggingLedController): def __init__(self): self.bus = smbus2.SMBus(1) self.bus.write_byte_data(0x20, 0x00, 0x00) self.bus.write_byte_data(0x20, 0x01,", "return -1 folderAddress = int(realEstate['folderAddress']) return folderAddress except Exception as e: logging.warn('Exception when", "smbus2 from apscheduler.schedulers.blocking import BlockingScheduler class LedController: def reset(self): pass def set(self, id):", "= 0x15 bitmask = id % 8 self.bus.write_byte_data(0x20, register, bitmask) # Controller for", "url = TEST_ENV color = (0, 0, 255) controller = WS2812LedController(color) def job():", "= get_active_address() if address < 0: controller.reset() else: controller.set(address) def get_mock_address(): return random.randint(-1,", "seconds=5) try: scheduler.start() except (KeyboardInterrupt): controller.reset() logging.info('Stopping process.') if __name__ == \"__main__\": init()", "else: controller.set(address) def get_mock_address(): return random.randint(-1, 100) def get_active_address(): try: r = requests.get(url,", "'http://192.168.0.199:8080/v1/display' PROD_ENV = 'http://10.24.6.35/api/v1/display' url = TEST_ENV color = (0, 0, 255) controller", "import requests import board import neopixel import smbus2 from apscheduler.schedulers.blocking import BlockingScheduler class", "0x15 bitmask = id % 8 self.bus.write_byte_data(0x20, register, bitmask) # Controller for WS2812", "def __init__(self): self.bus = smbus2.SMBus(1) self.bus.write_byte_data(0x20, 0x00, 0x00) self.bus.write_byte_data(0x20, 0x01, 0x00) def reset(self):", "0: controller.reset() else: controller.set(address) def get_mock_address(): return random.randint(-1, 100) def get_active_address(): try: r", "def job(): address = get_active_address() if address < 0: controller.reset() else: controller.set(address) def", "address = get_active_address() if address < 0: controller.reset() else: controller.set(address) def get_mock_address(): return", "logging.basicConfig(level=logging.INFO) TEST_ENV = 'http://192.168.0.199:8080/v1/display' PROD_ENV = 'http://10.24.6.35/api/v1/display' url = TEST_ENV color = (0,", "logging.info('Reset') def set(self, id): logging.info('set {}'.format(id)) # Controller for I2C connected LEDs class", "operation: logging.debug('No realEstate.') return -1 realEstate = operation['realEstate'] if 'folderAddress' not in realEstate:", "BlockingScheduler() scheduler.add_job(job, 'interval', seconds=5) try: scheduler.start() except (KeyboardInterrupt): controller.reset() logging.info('Stopping process.') if __name__", "= neopixel.NeoPixel(board.D18, 144, auto_write=False) self._pixels.fill((0, 0, 0)) self._pixels.show() def reset(self): super(WS2812LedController, self).reset() self._pixels.fill((0,", "controller = WS2812LedController(color) def job(): address = get_active_address() if address < 0: controller.reset()", "data = r.json() if (data['state'] != 'OPERATION'): logging.debug('Not operation state.') return -1 if", "self._pixels.show() def reset(self): super(WS2812LedController, self).reset() self._pixels.fill((0, 0, 0)) self._pixels.show() def set(self, id): super(WS2812LedController,", "address < 0: controller.reset() else: controller.set(address) def get_mock_address(): return random.randint(-1, 100) def get_active_address():", "data.') logging.warn(e) return -1 def init(): logging.info('Starting process.') scheduler = BlockingScheduler() scheduler.add_job(job, 'interval',", "0x15, 0x00) def set(self, id): super(I2CLedController, self).set(id) register = 0x14 if id /", "0)) self._pixels.show() def reset(self): super(WS2812LedController, self).reset() self._pixels.fill((0, 0, 0)) self._pixels.show() def set(self, id):", "= operation['realEstate'] if 'folderAddress' not in realEstate: logging.debug('No folderAddress.') return -1 folderAddress =", "= 'http://10.24.6.35/api/v1/display' url = TEST_ENV color = (0, 0, 255) controller = WS2812LedController(color)", "self.bus.write_byte_data(0x20, 0x00, 0x00) self.bus.write_byte_data(0x20, 0x01, 0x00) def reset(self): super(I2CLedController, self).reset() self.bus.write_byte_data(0x20, 0x14, 0x00)", "# Controller for I2C connected LEDs class I2CLedController(LoggingLedController): def __init__(self): self.bus = smbus2.SMBus(1)", "self._pixels[id] = self._color self._pixels.show() # BASIC OPTIONS logging.basicConfig(level=logging.INFO) TEST_ENV = 'http://192.168.0.199:8080/v1/display' PROD_ENV =", "id): logging.info('set {}'.format(id)) # Controller for I2C connected LEDs class I2CLedController(LoggingLedController): def __init__(self):", "reset(self): super(WS2812LedController, self).reset() self._pixels.fill((0, 0, 0)) self._pixels.show() def set(self, id): super(WS2812LedController, self).set(id) self._pixels.fill((0,", "super(I2CLedController, self).reset() self.bus.write_byte_data(0x20, 0x14, 0x00) self.bus.write_byte_data(0x20, 0x15, 0x00) def set(self, id): super(I2CLedController, self).set(id)", "/ 8 > 0: register = 0x15 bitmask = id % 8 self.bus.write_byte_data(0x20,", "operation = data['operation'] if 'realEstate' not in operation: logging.debug('No realEstate.') return -1 realEstate", "super(I2CLedController, self).set(id) register = 0x14 if id / 8 > 0: register =", "LedController: def reset(self): pass def set(self, id): pass class LoggingLedController(LedController): def reset(self): logging.info('Reset')", "when getting data.') logging.warn(e) return -1 def init(): logging.info('Starting process.') scheduler = BlockingScheduler()", "def get_active_address(): try: r = requests.get(url, timeout=2) data = r.json() if (data['state'] !=", "import logging import random import requests import board import neopixel import smbus2 from", "= id % 8 self.bus.write_byte_data(0x20, register, bitmask) # Controller for WS2812 connected LEDs", "0x00) self.bus.write_byte_data(0x20, 0x01, 0x00) def reset(self): super(I2CLedController, self).reset() self.bus.write_byte_data(0x20, 0x14, 0x00) self.bus.write_byte_data(0x20, 0x15,", "__init__(self, color): self._color = color self._pixels = neopixel.NeoPixel(board.D18, 144, auto_write=False) self._pixels.fill((0, 0, 0))", "0)) self._pixels.show() def set(self, id): super(WS2812LedController, self).set(id) self._pixels.fill((0, 0, 0)) self._pixels[id] = self._color", "apscheduler.schedulers.blocking import BlockingScheduler class LedController: def reset(self): pass def set(self, id): pass class", "not in operation: logging.debug('No realEstate.') return -1 realEstate = operation['realEstate'] if 'folderAddress' not", "self.bus.write_byte_data(0x20, register, bitmask) # Controller for WS2812 connected LEDs class WS2812LedController(LedController): def __init__(self,", "operation['realEstate'] if 'folderAddress' not in realEstate: logging.debug('No folderAddress.') return -1 folderAddress = int(realEstate['folderAddress'])", "-1 folderAddress = int(realEstate['folderAddress']) return folderAddress except Exception as e: logging.warn('Exception when getting", "0)) self._pixels[id] = self._color self._pixels.show() # BASIC OPTIONS logging.basicConfig(level=logging.INFO) TEST_ENV = 'http://192.168.0.199:8080/v1/display' PROD_ENV", "!= 'OPERATION'): logging.debug('Not operation state.') return -1 if 'operation' not in data: logging.debug('No", "for WS2812 connected LEDs class WS2812LedController(LedController): def __init__(self, color): self._color = color self._pixels", "logging.info('set {}'.format(id)) # Controller for I2C connected LEDs class I2CLedController(LoggingLedController): def __init__(self): self.bus", "self.bus.write_byte_data(0x20, 0x15, 0x00) def set(self, id): super(I2CLedController, self).set(id) register = 0x14 if id", "set(self, id): logging.info('set {}'.format(id)) # Controller for I2C connected LEDs class I2CLedController(LoggingLedController): def", "0, 0)) self._pixels[id] = self._color self._pixels.show() # BASIC OPTIONS logging.basicConfig(level=logging.INFO) TEST_ENV = 'http://192.168.0.199:8080/v1/display'", "in operation: logging.debug('No realEstate.') return -1 realEstate = operation['realEstate'] if 'folderAddress' not in", "Exception as e: logging.warn('Exception when getting data.') logging.warn(e) return -1 def init(): logging.info('Starting", "logging.warn('Exception when getting data.') logging.warn(e) return -1 def init(): logging.info('Starting process.') scheduler =", "return -1 def init(): logging.info('Starting process.') scheduler = BlockingScheduler() scheduler.add_job(job, 'interval', seconds=5) try:", "if 'realEstate' not in operation: logging.debug('No realEstate.') return -1 realEstate = operation['realEstate'] if", "scheduler.add_job(job, 'interval', seconds=5) try: scheduler.start() except (KeyboardInterrupt): controller.reset() logging.info('Stopping process.') if __name__ ==", "0x00) self.bus.write_byte_data(0x20, 0x15, 0x00) def set(self, id): super(I2CLedController, self).set(id) register = 0x14 if", "random import requests import board import neopixel import smbus2 from apscheduler.schedulers.blocking import BlockingScheduler", "self).reset() self.bus.write_byte_data(0x20, 0x14, 0x00) self.bus.write_byte_data(0x20, 0x15, 0x00) def set(self, id): super(I2CLedController, self).set(id) register", "def __init__(self, color): self._color = color self._pixels = neopixel.NeoPixel(board.D18, 144, auto_write=False) self._pixels.fill((0, 0,", "= int(realEstate['folderAddress']) return folderAddress except Exception as e: logging.warn('Exception when getting data.') logging.warn(e)", "return folderAddress except Exception as e: logging.warn('Exception when getting data.') logging.warn(e) return -1", "neopixel import smbus2 from apscheduler.schedulers.blocking import BlockingScheduler class LedController: def reset(self): pass def", "getting data.') logging.warn(e) return -1 def init(): logging.info('Starting process.') scheduler = BlockingScheduler() scheduler.add_job(job,", "return -1 realEstate = operation['realEstate'] if 'folderAddress' not in realEstate: logging.debug('No folderAddress.') return", "return random.randint(-1, 100) def get_active_address(): try: r = requests.get(url, timeout=2) data = r.json()", "255) controller = WS2812LedController(color) def job(): address = get_active_address() if address < 0:", "state.') return -1 if 'operation' not in data: logging.debug('No operation.') return -1 operation", "operation.') return -1 operation = data['operation'] if 'realEstate' not in operation: logging.debug('No realEstate.')", "int(realEstate['folderAddress']) return folderAddress except Exception as e: logging.warn('Exception when getting data.') logging.warn(e) return", "color = (0, 0, 255) controller = WS2812LedController(color) def job(): address = get_active_address()", "return -1 if 'operation' not in data: logging.debug('No operation.') return -1 operation =", "-1 def init(): logging.info('Starting process.') scheduler = BlockingScheduler() scheduler.add_job(job, 'interval', seconds=5) try: scheduler.start()", "requests.get(url, timeout=2) data = r.json() if (data['state'] != 'OPERATION'): logging.debug('Not operation state.') return", "class I2CLedController(LoggingLedController): def __init__(self): self.bus = smbus2.SMBus(1) self.bus.write_byte_data(0x20, 0x00, 0x00) self.bus.write_byte_data(0x20, 0x01, 0x00)", "self).set(id) register = 0x14 if id / 8 > 0: register = 0x15", "WS2812LedController(LedController): def __init__(self, color): self._color = color self._pixels = neopixel.NeoPixel(board.D18, 144, auto_write=False) self._pixels.fill((0,", "self._pixels = neopixel.NeoPixel(board.D18, 144, auto_write=False) self._pixels.fill((0, 0, 0)) self._pixels.show() def reset(self): super(WS2812LedController, self).reset()", "register = 0x14 if id / 8 > 0: register = 0x15 bitmask", "timeout=2) data = r.json() if (data['state'] != 'OPERATION'): logging.debug('Not operation state.') return -1", "return -1 operation = data['operation'] if 'realEstate' not in operation: logging.debug('No realEstate.') return", "Controller for I2C connected LEDs class I2CLedController(LoggingLedController): def __init__(self): self.bus = smbus2.SMBus(1) self.bus.write_byte_data(0x20,", "self.bus.write_byte_data(0x20, 0x14, 0x00) self.bus.write_byte_data(0x20, 0x15, 0x00) def set(self, id): super(I2CLedController, self).set(id) register =", "self._pixels.show() # BASIC OPTIONS logging.basicConfig(level=logging.INFO) TEST_ENV = 'http://192.168.0.199:8080/v1/display' PROD_ENV = 'http://10.24.6.35/api/v1/display' url =" ]
[ "td_sigi = read('Sig1000_IMU.ad2cp', userdata=False) td_sigi_ud = read('Sig1000_IMU.ad2cp') td_awac = read('AWAC_test01.wpr', userdata=False) td_awac_ud =", "!CLEANUP! # BUG that's loading different data??! td_sigi.pop('sys.temp_mag') dat_sigi_tmp = dat_sigi.copy() dat_sigi_tmp.pop('sys.temp_mag') td_sigi_ud.pop('sys.temp_mag')", "dat1, dat2, msg in [ (td_rdi, dat_rdi, msg('RDI_test01.000')), (td_sig, dat_sig, msg('BenchFile01.ad2cp')), (td_sigi, dat_sigi_tmp,", "read('Sig1000_IMU.ad2cp') td_awac = read('AWAC_test01.wpr', userdata=False) td_awac_ud = read('AWAC_test01.wpr') td_wr1 = read('winriver01.PD0') td_wr2 =", "dat_awac_ud, msg('AWAC_test01.wpr+userdata')), (td_wr1, dat_wr1, msg('winriver01.PD0')), (td_wr2, dat_wr2, msg('winriver02.PD0')), ]: yield tb.data_equiv, dat1, dat2,", "yield tb.data_equiv, dat1, dat2, msg if __name__ == '__main__': for func, dat1, dat2,", "from dolfyn.main import read_example as read import dolfyn.test.base as tb import sys load", "read('winriver01.PD0') td_wr2 = read('winriver02.PD0') if make_data: save(td_rdi, 'RDI_test01.h5') save(td_sig, 'BenchFile01.h5') save(td_sigi, 'Sig1000_IMU.h5') save(td_sigi_ud,", "dat_sig, msg('BenchFile01.ad2cp')), (td_sigi, dat_sigi_tmp, msg('Sig1000_IMU.ad2cp')), (td_sigi_ud, dat_sigi_ud_tmp, msg('Sig1000_IMU_ud.ad2cp')), (td_awac, dat_awac, msg('AWAC_test01.wpr')), (td_awac_ud, dat_awac_ud,", "import read_example as read import dolfyn.test.base as tb import sys load = tb.load_tdata", "import sys load = tb.load_tdata save = tb.save_tdata dat_rdi = load('RDI_test01.h5') dat_rdi_i =", "dat2, msg if __name__ == '__main__': for func, dat1, dat2, msg in test_read():", "= load('RDI_test01_rotate_beam2inst.h5') dat_awac = load('AWAC_test01.h5') dat_awac_ud = load('AWAC_test01_ud.h5') dat_sig = load('BenchFile01.h5') dat_sigi =", "td_sigi_ud.pop('sys.temp_mag') dat_sigi_ud_tmp = dat_sigi_ud.copy() dat_sigi_ud_tmp.pop('sys.temp_mag') else: dat_sigi_tmp = dat_sigi dat_sigi_ud_tmp = dat_sigi_ud def", "= read('Sig1000_IMU.ad2cp', userdata=False) td_sigi_ud = read('Sig1000_IMU.ad2cp') td_awac = read('AWAC_test01.wpr', userdata=False) td_awac_ud = read('AWAC_test01.wpr')", ".format(infile, testfile)) for dat1, dat2, msg in [ (td_rdi, dat_rdi, msg('RDI_test01.000')), (td_sig, dat_sig,", "This uses the built-in declination! td_sig = read('BenchFile01.ad2cp') td_sigi = read('Sig1000_IMU.ad2cp', userdata=False) td_sigi_ud", "dat_wr2, msg('winriver02.PD0')), ]: yield tb.data_equiv, dat1, dat2, msg if __name__ == '__main__': for", "read('RDI_test01.000') # This uses the built-in declination! td_sig = read('BenchFile01.ad2cp') td_sigi = read('Sig1000_IMU.ad2cp',", "dat_awac = load('AWAC_test01.h5') dat_awac_ud = load('AWAC_test01_ud.h5') dat_sig = load('BenchFile01.h5') dat_sigi = load('Sig1000_IMU.h5') dat_sigi_ud", "save(td_awac_ud, 'AWAC_test01_ud.h5') save(td_wr1, 'winriver01.h5') save(td_wr2, 'winriver02.h5') return if sys.version_info.major == 2: # This", "msg('RDI_test01.000')), (td_sig, dat_sig, msg('BenchFile01.ad2cp')), (td_sigi, dat_sigi_tmp, msg('Sig1000_IMU.ad2cp')), (td_sigi_ud, dat_sigi_ud_tmp, msg('Sig1000_IMU_ud.ad2cp')), (td_awac, dat_awac, msg('AWAC_test01.wpr')),", "td_sig = read('BenchFile01.ad2cp') td_sigi = read('Sig1000_IMU.ad2cp', userdata=False) td_sigi_ud = read('Sig1000_IMU.ad2cp') td_awac = read('AWAC_test01.wpr',", "= load('AWAC_test01_ud.h5') dat_sig = load('BenchFile01.h5') dat_sigi = load('Sig1000_IMU.h5') dat_sigi_ud = load('Sig1000_IMU_ud.h5') dat_wr1 =", "dat_sigi_ud_tmp, msg('Sig1000_IMU_ud.ad2cp')), (td_awac, dat_awac, msg('AWAC_test01.wpr')), (td_awac_ud, dat_awac_ud, msg('AWAC_test01.wpr+userdata')), (td_wr1, dat_wr1, msg('winriver01.PD0')), (td_wr2, dat_wr2,", "= load('BenchFile01.h5') dat_sigi = load('Sig1000_IMU.h5') dat_sigi_ud = load('Sig1000_IMU_ud.h5') dat_wr1 = load('winriver01.h5') dat_wr2 =", "This is a HACK for Py2 # for some reason a very small", "'AWAC_test01.h5') save(td_awac_ud, 'AWAC_test01_ud.h5') save(td_wr1, 'winriver01.h5') save(td_wr2, 'winriver02.h5') return if sys.version_info.major == 2: #", "'winriver02.h5') return if sys.version_info.major == 2: # This is a HACK for Py2", "read('BenchFile01.ad2cp') td_sigi = read('Sig1000_IMU.ad2cp', userdata=False) td_sigi_ud = read('Sig1000_IMU.ad2cp') td_awac = read('AWAC_test01.wpr', userdata=False) td_awac_ud", "= infile.split('.')[0] + '.h5' return (\"The output of read('{}') does not match '{}'.\"", "'RDI_test01.h5') save(td_sig, 'BenchFile01.h5') save(td_sigi, 'Sig1000_IMU.h5') save(td_sigi_ud, 'Sig1000_IMU_ud.h5') save(td_awac, 'AWAC_test01.h5') save(td_awac_ud, 'AWAC_test01_ud.h5') save(td_wr1, 'winriver01.h5')", "save(td_awac, 'AWAC_test01.h5') save(td_awac_ud, 'AWAC_test01_ud.h5') save(td_wr1, 'winriver01.h5') save(td_wr2, 'winriver02.h5') return if sys.version_info.major == 2:", "dat_sigi_tmp.pop('sys.temp_mag') td_sigi_ud.pop('sys.temp_mag') dat_sigi_ud_tmp = dat_sigi_ud.copy() dat_sigi_ud_tmp.pop('sys.temp_mag') else: dat_sigi_tmp = dat_sigi dat_sigi_ud_tmp = dat_sigi_ud", "tb.data_equiv, dat1, dat2, msg if __name__ == '__main__': for func, dat1, dat2, msg", "loading different data??! td_sigi.pop('sys.temp_mag') dat_sigi_tmp = dat_sigi.copy() dat_sigi_tmp.pop('sys.temp_mag') td_sigi_ud.pop('sys.temp_mag') dat_sigi_ud_tmp = dat_sigi_ud.copy() dat_sigi_ud_tmp.pop('sys.temp_mag')", "# !CLEANUP! # BUG that's loading different data??! td_sigi.pop('sys.temp_mag') dat_sigi_tmp = dat_sigi.copy() dat_sigi_tmp.pop('sys.temp_mag')", "load('Sig1000_IMU.h5') dat_sigi_ud = load('Sig1000_IMU_ud.h5') dat_wr1 = load('winriver01.h5') dat_wr2 = load('winriver02.h5') def test_read(make_data=False): td_rdi", "built-in declination! td_sig = read('BenchFile01.ad2cp') td_sigi = read('Sig1000_IMU.ad2cp', userdata=False) td_sigi_ud = read('Sig1000_IMU.ad2cp') td_awac", "'Sig1000_IMU.h5') save(td_sigi_ud, 'Sig1000_IMU_ud.h5') save(td_awac, 'AWAC_test01.h5') save(td_awac_ud, 'AWAC_test01_ud.h5') save(td_wr1, 'winriver01.h5') save(td_wr2, 'winriver02.h5') return if", "load('winriver01.h5') dat_wr2 = load('winriver02.h5') def test_read(make_data=False): td_rdi = read('RDI_test01.000') # This uses the", "td_awac_ud = read('AWAC_test01.wpr') td_wr1 = read('winriver01.PD0') td_wr2 = read('winriver02.PD0') if make_data: save(td_rdi, 'RDI_test01.h5')", "the same for py2? # !CLEANUP! # BUG that's loading different data??! td_sigi.pop('sys.temp_mag')", "td_sigi_ud = read('Sig1000_IMU.ad2cp') td_awac = read('AWAC_test01.wpr', userdata=False) td_awac_ud = read('AWAC_test01.wpr') td_wr1 = read('winriver01.PD0')", "same for py2? # !CLEANUP! # BUG that's loading different data??! td_sigi.pop('sys.temp_mag') dat_sigi_tmp", "for py2? # !CLEANUP! # BUG that's loading different data??! td_sigi.pop('sys.temp_mag') dat_sigi_tmp =", "td_sigi.pop('sys.temp_mag') dat_sigi_tmp = dat_sigi.copy() dat_sigi_tmp.pop('sys.temp_mag') td_sigi_ud.pop('sys.temp_mag') dat_sigi_ud_tmp = dat_sigi_ud.copy() dat_sigi_ud_tmp.pop('sys.temp_mag') else: dat_sigi_tmp =", "save(td_wr2, 'winriver02.h5') return if sys.version_info.major == 2: # This is a HACK for", "'.h5' return (\"The output of read('{}') does not match '{}'.\" .format(infile, testfile)) for", "load('AWAC_test01_ud.h5') dat_sig = load('BenchFile01.h5') dat_sigi = load('Sig1000_IMU.h5') dat_sigi_ud = load('Sig1000_IMU_ud.h5') dat_wr1 = load('winriver01.h5')", "HACK for Py2 # for some reason a very small numer of the", "dolfyn.test.base as tb import sys load = tb.load_tdata save = tb.save_tdata dat_rdi =", "= load('winriver02.h5') def test_read(make_data=False): td_rdi = read('RDI_test01.000') # This uses the built-in declination!", "declination! td_sig = read('BenchFile01.ad2cp') td_sigi = read('Sig1000_IMU.ad2cp', userdata=False) td_sigi_ud = read('Sig1000_IMU.ad2cp') td_awac =", "not match '{}'.\" .format(infile, testfile)) for dat1, dat2, msg in [ (td_rdi, dat_rdi,", "dat_sigi_tmp, msg('Sig1000_IMU.ad2cp')), (td_sigi_ud, dat_sigi_ud_tmp, msg('Sig1000_IMU_ud.ad2cp')), (td_awac, dat_awac, msg('AWAC_test01.wpr')), (td_awac_ud, dat_awac_ud, msg('AWAC_test01.wpr+userdata')), (td_wr1, dat_wr1,", "tb.save_tdata dat_rdi = load('RDI_test01.h5') dat_rdi_i = load('RDI_test01_rotate_beam2inst.h5') dat_awac = load('AWAC_test01.h5') dat_awac_ud = load('AWAC_test01_ud.h5')", "__name__ == '__main__': for func, dat1, dat2, msg in test_read(): func(dat1, dat2, msg)", "for some reason a very small numer of the values in temp_mag #", "dat_sigi_ud_tmp = dat_sigi_ud.copy() dat_sigi_ud_tmp.pop('sys.temp_mag') else: dat_sigi_tmp = dat_sigi dat_sigi_ud_tmp = dat_sigi_ud def msg(infile):", "for dat1, dat2, msg in [ (td_rdi, dat_rdi, msg('RDI_test01.000')), (td_sig, dat_sig, msg('BenchFile01.ad2cp')), (td_sigi,", "[ (td_rdi, dat_rdi, msg('RDI_test01.000')), (td_sig, dat_sig, msg('BenchFile01.ad2cp')), (td_sigi, dat_sigi_tmp, msg('Sig1000_IMU.ad2cp')), (td_sigi_ud, dat_sigi_ud_tmp, msg('Sig1000_IMU_ud.ad2cp')),", "save(td_wr1, 'winriver01.h5') save(td_wr2, 'winriver02.h5') return if sys.version_info.major == 2: # This is a", "dolfyn.main import read_example as read import dolfyn.test.base as tb import sys load =", "td_rdi = read('RDI_test01.000') # This uses the built-in declination! td_sig = read('BenchFile01.ad2cp') td_sigi", "if sys.version_info.major == 2: # This is a HACK for Py2 # for", "(td_sigi, dat_sigi_tmp, msg('Sig1000_IMU.ad2cp')), (td_sigi_ud, dat_sigi_ud_tmp, msg('Sig1000_IMU_ud.ad2cp')), (td_awac, dat_awac, msg('AWAC_test01.wpr')), (td_awac_ud, dat_awac_ud, msg('AWAC_test01.wpr+userdata')), (td_wr1,", "(td_awac, dat_awac, msg('AWAC_test01.wpr')), (td_awac_ud, dat_awac_ud, msg('AWAC_test01.wpr+userdata')), (td_wr1, dat_wr1, msg('winriver01.PD0')), (td_wr2, dat_wr2, msg('winriver02.PD0')), ]:", "dat_wr2 = load('winriver02.h5') def test_read(make_data=False): td_rdi = read('RDI_test01.000') # This uses the built-in", "(td_awac_ud, dat_awac_ud, msg('AWAC_test01.wpr+userdata')), (td_wr1, dat_wr1, msg('winriver01.PD0')), (td_wr2, dat_wr2, msg('winriver02.PD0')), ]: yield tb.data_equiv, dat1,", "save(td_sig, 'BenchFile01.h5') save(td_sigi, 'Sig1000_IMU.h5') save(td_sigi_ud, 'Sig1000_IMU_ud.h5') save(td_awac, 'AWAC_test01.h5') save(td_awac_ud, 'AWAC_test01_ud.h5') save(td_wr1, 'winriver01.h5') save(td_wr2,", "dat_sigi_ud_tmp = dat_sigi_ud def msg(infile): testfile = infile.split('.')[0] + '.h5' return (\"The output", "the values in temp_mag # are not the same for py2? # !CLEANUP!", "if __name__ == '__main__': for func, dat1, dat2, msg in test_read(): func(dat1, dat2,", "# for some reason a very small numer of the values in temp_mag", "dat_sigi_tmp = dat_sigi dat_sigi_ud_tmp = dat_sigi_ud def msg(infile): testfile = infile.split('.')[0] + '.h5'", "== 2: # This is a HACK for Py2 # for some reason", "save(td_rdi, 'RDI_test01.h5') save(td_sig, 'BenchFile01.h5') save(td_sigi, 'Sig1000_IMU.h5') save(td_sigi_ud, 'Sig1000_IMU_ud.h5') save(td_awac, 'AWAC_test01.h5') save(td_awac_ud, 'AWAC_test01_ud.h5') save(td_wr1,", "def test_read(make_data=False): td_rdi = read('RDI_test01.000') # This uses the built-in declination! td_sig =", "<reponame>aidanbharath/dolfyn from dolfyn.main import read_example as read import dolfyn.test.base as tb import sys", "sys load = tb.load_tdata save = tb.save_tdata dat_rdi = load('RDI_test01.h5') dat_rdi_i = load('RDI_test01_rotate_beam2inst.h5')", "'BenchFile01.h5') save(td_sigi, 'Sig1000_IMU.h5') save(td_sigi_ud, 'Sig1000_IMU_ud.h5') save(td_awac, 'AWAC_test01.h5') save(td_awac_ud, 'AWAC_test01_ud.h5') save(td_wr1, 'winriver01.h5') save(td_wr2, 'winriver02.h5')", "dat_sigi dat_sigi_ud_tmp = dat_sigi_ud def msg(infile): testfile = infile.split('.')[0] + '.h5' return (\"The", "msg('BenchFile01.ad2cp')), (td_sigi, dat_sigi_tmp, msg('Sig1000_IMU.ad2cp')), (td_sigi_ud, dat_sigi_ud_tmp, msg('Sig1000_IMU_ud.ad2cp')), (td_awac, dat_awac, msg('AWAC_test01.wpr')), (td_awac_ud, dat_awac_ud, msg('AWAC_test01.wpr+userdata')),", "some reason a very small numer of the values in temp_mag # are", "dat2, msg in [ (td_rdi, dat_rdi, msg('RDI_test01.000')), (td_sig, dat_sig, msg('BenchFile01.ad2cp')), (td_sigi, dat_sigi_tmp, msg('Sig1000_IMU.ad2cp')),", "dat_sig = load('BenchFile01.h5') dat_sigi = load('Sig1000_IMU.h5') dat_sigi_ud = load('Sig1000_IMU_ud.h5') dat_wr1 = load('winriver01.h5') dat_wr2", "= read('winriver02.PD0') if make_data: save(td_rdi, 'RDI_test01.h5') save(td_sig, 'BenchFile01.h5') save(td_sigi, 'Sig1000_IMU.h5') save(td_sigi_ud, 'Sig1000_IMU_ud.h5') save(td_awac,", "for Py2 # for some reason a very small numer of the values", "of the values in temp_mag # are not the same for py2? #", "msg('winriver01.PD0')), (td_wr2, dat_wr2, msg('winriver02.PD0')), ]: yield tb.data_equiv, dat1, dat2, msg if __name__ ==", "sys.version_info.major == 2: # This is a HACK for Py2 # for some", "'Sig1000_IMU_ud.h5') save(td_awac, 'AWAC_test01.h5') save(td_awac_ud, 'AWAC_test01_ud.h5') save(td_wr1, 'winriver01.h5') save(td_wr2, 'winriver02.h5') return if sys.version_info.major ==", "dat1, dat2, msg if __name__ == '__main__': for func, dat1, dat2, msg in", "]: yield tb.data_equiv, dat1, dat2, msg if __name__ == '__main__': for func, dat1,", "dat_sigi_tmp = dat_sigi.copy() dat_sigi_tmp.pop('sys.temp_mag') td_sigi_ud.pop('sys.temp_mag') dat_sigi_ud_tmp = dat_sigi_ud.copy() dat_sigi_ud_tmp.pop('sys.temp_mag') else: dat_sigi_tmp = dat_sigi", "make_data: save(td_rdi, 'RDI_test01.h5') save(td_sig, 'BenchFile01.h5') save(td_sigi, 'Sig1000_IMU.h5') save(td_sigi_ud, 'Sig1000_IMU_ud.h5') save(td_awac, 'AWAC_test01.h5') save(td_awac_ud, 'AWAC_test01_ud.h5')", "= load('winriver01.h5') dat_wr2 = load('winriver02.h5') def test_read(make_data=False): td_rdi = read('RDI_test01.000') # This uses", "read import dolfyn.test.base as tb import sys load = tb.load_tdata save = tb.save_tdata", "import dolfyn.test.base as tb import sys load = tb.load_tdata save = tb.save_tdata dat_rdi", "load('RDI_test01.h5') dat_rdi_i = load('RDI_test01_rotate_beam2inst.h5') dat_awac = load('AWAC_test01.h5') dat_awac_ud = load('AWAC_test01_ud.h5') dat_sig = load('BenchFile01.h5')", "(td_sigi_ud, dat_sigi_ud_tmp, msg('Sig1000_IMU_ud.ad2cp')), (td_awac, dat_awac, msg('AWAC_test01.wpr')), (td_awac_ud, dat_awac_ud, msg('AWAC_test01.wpr+userdata')), (td_wr1, dat_wr1, msg('winriver01.PD0')), (td_wr2,", "dat_sigi_ud = load('Sig1000_IMU_ud.h5') dat_wr1 = load('winriver01.h5') dat_wr2 = load('winriver02.h5') def test_read(make_data=False): td_rdi =", "save(td_sigi_ud, 'Sig1000_IMU_ud.h5') save(td_awac, 'AWAC_test01.h5') save(td_awac_ud, 'AWAC_test01_ud.h5') save(td_wr1, 'winriver01.h5') save(td_wr2, 'winriver02.h5') return if sys.version_info.major", "= load('Sig1000_IMU_ud.h5') dat_wr1 = load('winriver01.h5') dat_wr2 = load('winriver02.h5') def test_read(make_data=False): td_rdi = read('RDI_test01.000')", "def msg(infile): testfile = infile.split('.')[0] + '.h5' return (\"The output of read('{}') does", "= tb.save_tdata dat_rdi = load('RDI_test01.h5') dat_rdi_i = load('RDI_test01_rotate_beam2inst.h5') dat_awac = load('AWAC_test01.h5') dat_awac_ud =", "= load('RDI_test01.h5') dat_rdi_i = load('RDI_test01_rotate_beam2inst.h5') dat_awac = load('AWAC_test01.h5') dat_awac_ud = load('AWAC_test01_ud.h5') dat_sig =", "# This uses the built-in declination! td_sig = read('BenchFile01.ad2cp') td_sigi = read('Sig1000_IMU.ad2cp', userdata=False)", "not the same for py2? # !CLEANUP! # BUG that's loading different data??!", "read('AWAC_test01.wpr', userdata=False) td_awac_ud = read('AWAC_test01.wpr') td_wr1 = read('winriver01.PD0') td_wr2 = read('winriver02.PD0') if make_data:", "= dat_sigi dat_sigi_ud_tmp = dat_sigi_ud def msg(infile): testfile = infile.split('.')[0] + '.h5' return", "msg in [ (td_rdi, dat_rdi, msg('RDI_test01.000')), (td_sig, dat_sig, msg('BenchFile01.ad2cp')), (td_sigi, dat_sigi_tmp, msg('Sig1000_IMU.ad2cp')), (td_sigi_ud,", "msg('winriver02.PD0')), ]: yield tb.data_equiv, dat1, dat2, msg if __name__ == '__main__': for func,", "read('winriver02.PD0') if make_data: save(td_rdi, 'RDI_test01.h5') save(td_sig, 'BenchFile01.h5') save(td_sigi, 'Sig1000_IMU.h5') save(td_sigi_ud, 'Sig1000_IMU_ud.h5') save(td_awac, 'AWAC_test01.h5')", "tb import sys load = tb.load_tdata save = tb.save_tdata dat_rdi = load('RDI_test01.h5') dat_rdi_i", "BUG that's loading different data??! td_sigi.pop('sys.temp_mag') dat_sigi_tmp = dat_sigi.copy() dat_sigi_tmp.pop('sys.temp_mag') td_sigi_ud.pop('sys.temp_mag') dat_sigi_ud_tmp =", "that's loading different data??! td_sigi.pop('sys.temp_mag') dat_sigi_tmp = dat_sigi.copy() dat_sigi_tmp.pop('sys.temp_mag') td_sigi_ud.pop('sys.temp_mag') dat_sigi_ud_tmp = dat_sigi_ud.copy()", "2: # This is a HACK for Py2 # for some reason a", "= read('AWAC_test01.wpr', userdata=False) td_awac_ud = read('AWAC_test01.wpr') td_wr1 = read('winriver01.PD0') td_wr2 = read('winriver02.PD0') if", "msg if __name__ == '__main__': for func, dat1, dat2, msg in test_read(): func(dat1,", "tb.load_tdata save = tb.save_tdata dat_rdi = load('RDI_test01.h5') dat_rdi_i = load('RDI_test01_rotate_beam2inst.h5') dat_awac = load('AWAC_test01.h5')", "a HACK for Py2 # for some reason a very small numer of", "small numer of the values in temp_mag # are not the same for", "py2? # !CLEANUP! # BUG that's loading different data??! td_sigi.pop('sys.temp_mag') dat_sigi_tmp = dat_sigi.copy()", "dat_rdi_i = load('RDI_test01_rotate_beam2inst.h5') dat_awac = load('AWAC_test01.h5') dat_awac_ud = load('AWAC_test01_ud.h5') dat_sig = load('BenchFile01.h5') dat_sigi", "output of read('{}') does not match '{}'.\" .format(infile, testfile)) for dat1, dat2, msg", "'{}'.\" .format(infile, testfile)) for dat1, dat2, msg in [ (td_rdi, dat_rdi, msg('RDI_test01.000')), (td_sig,", "+ '.h5' return (\"The output of read('{}') does not match '{}'.\" .format(infile, testfile))", "dat_sigi_ud.copy() dat_sigi_ud_tmp.pop('sys.temp_mag') else: dat_sigi_tmp = dat_sigi dat_sigi_ud_tmp = dat_sigi_ud def msg(infile): testfile =", "userdata=False) td_sigi_ud = read('Sig1000_IMU.ad2cp') td_awac = read('AWAC_test01.wpr', userdata=False) td_awac_ud = read('AWAC_test01.wpr') td_wr1 =", "msg('AWAC_test01.wpr+userdata')), (td_wr1, dat_wr1, msg('winriver01.PD0')), (td_wr2, dat_wr2, msg('winriver02.PD0')), ]: yield tb.data_equiv, dat1, dat2, msg", "save = tb.save_tdata dat_rdi = load('RDI_test01.h5') dat_rdi_i = load('RDI_test01_rotate_beam2inst.h5') dat_awac = load('AWAC_test01.h5') dat_awac_ud", "load('BenchFile01.h5') dat_sigi = load('Sig1000_IMU.h5') dat_sigi_ud = load('Sig1000_IMU_ud.h5') dat_wr1 = load('winriver01.h5') dat_wr2 = load('winriver02.h5')", "load('AWAC_test01.h5') dat_awac_ud = load('AWAC_test01_ud.h5') dat_sig = load('BenchFile01.h5') dat_sigi = load('Sig1000_IMU.h5') dat_sigi_ud = load('Sig1000_IMU_ud.h5')", "td_wr1 = read('winriver01.PD0') td_wr2 = read('winriver02.PD0') if make_data: save(td_rdi, 'RDI_test01.h5') save(td_sig, 'BenchFile01.h5') save(td_sigi,", "(\"The output of read('{}') does not match '{}'.\" .format(infile, testfile)) for dat1, dat2,", "'AWAC_test01_ud.h5') save(td_wr1, 'winriver01.h5') save(td_wr2, 'winriver02.h5') return if sys.version_info.major == 2: # This is", "# BUG that's loading different data??! td_sigi.pop('sys.temp_mag') dat_sigi_tmp = dat_sigi.copy() dat_sigi_tmp.pop('sys.temp_mag') td_sigi_ud.pop('sys.temp_mag') dat_sigi_ud_tmp", "msg('Sig1000_IMU_ud.ad2cp')), (td_awac, dat_awac, msg('AWAC_test01.wpr')), (td_awac_ud, dat_awac_ud, msg('AWAC_test01.wpr+userdata')), (td_wr1, dat_wr1, msg('winriver01.PD0')), (td_wr2, dat_wr2, msg('winriver02.PD0')),", "numer of the values in temp_mag # are not the same for py2?", "testfile = infile.split('.')[0] + '.h5' return (\"The output of read('{}') does not match", "userdata=False) td_awac_ud = read('AWAC_test01.wpr') td_wr1 = read('winriver01.PD0') td_wr2 = read('winriver02.PD0') if make_data: save(td_rdi,", "= load('Sig1000_IMU.h5') dat_sigi_ud = load('Sig1000_IMU_ud.h5') dat_wr1 = load('winriver01.h5') dat_wr2 = load('winriver02.h5') def test_read(make_data=False):", "read('AWAC_test01.wpr') td_wr1 = read('winriver01.PD0') td_wr2 = read('winriver02.PD0') if make_data: save(td_rdi, 'RDI_test01.h5') save(td_sig, 'BenchFile01.h5')", "in temp_mag # are not the same for py2? # !CLEANUP! # BUG", "does not match '{}'.\" .format(infile, testfile)) for dat1, dat2, msg in [ (td_rdi,", "= read('Sig1000_IMU.ad2cp') td_awac = read('AWAC_test01.wpr', userdata=False) td_awac_ud = read('AWAC_test01.wpr') td_wr1 = read('winriver01.PD0') td_wr2", "load('Sig1000_IMU_ud.h5') dat_wr1 = load('winriver01.h5') dat_wr2 = load('winriver02.h5') def test_read(make_data=False): td_rdi = read('RDI_test01.000') #", "msg(infile): testfile = infile.split('.')[0] + '.h5' return (\"The output of read('{}') does not", "= read('AWAC_test01.wpr') td_wr1 = read('winriver01.PD0') td_wr2 = read('winriver02.PD0') if make_data: save(td_rdi, 'RDI_test01.h5') save(td_sig,", "load('RDI_test01_rotate_beam2inst.h5') dat_awac = load('AWAC_test01.h5') dat_awac_ud = load('AWAC_test01_ud.h5') dat_sig = load('BenchFile01.h5') dat_sigi = load('Sig1000_IMU.h5')", "read('{}') does not match '{}'.\" .format(infile, testfile)) for dat1, dat2, msg in [", "= dat_sigi_ud def msg(infile): testfile = infile.split('.')[0] + '.h5' return (\"The output of", "dat_wr1, msg('winriver01.PD0')), (td_wr2, dat_wr2, msg('winriver02.PD0')), ]: yield tb.data_equiv, dat1, dat2, msg if __name__", "# This is a HACK for Py2 # for some reason a very", "(td_rdi, dat_rdi, msg('RDI_test01.000')), (td_sig, dat_sig, msg('BenchFile01.ad2cp')), (td_sigi, dat_sigi_tmp, msg('Sig1000_IMU.ad2cp')), (td_sigi_ud, dat_sigi_ud_tmp, msg('Sig1000_IMU_ud.ad2cp')), (td_awac,", "temp_mag # are not the same for py2? # !CLEANUP! # BUG that's", "infile.split('.')[0] + '.h5' return (\"The output of read('{}') does not match '{}'.\" .format(infile,", "as tb import sys load = tb.load_tdata save = tb.save_tdata dat_rdi = load('RDI_test01.h5')", "dat_sigi.copy() dat_sigi_tmp.pop('sys.temp_mag') td_sigi_ud.pop('sys.temp_mag') dat_sigi_ud_tmp = dat_sigi_ud.copy() dat_sigi_ud_tmp.pop('sys.temp_mag') else: dat_sigi_tmp = dat_sigi dat_sigi_ud_tmp =", "(td_sig, dat_sig, msg('BenchFile01.ad2cp')), (td_sigi, dat_sigi_tmp, msg('Sig1000_IMU.ad2cp')), (td_sigi_ud, dat_sigi_ud_tmp, msg('Sig1000_IMU_ud.ad2cp')), (td_awac, dat_awac, msg('AWAC_test01.wpr')), (td_awac_ud,", "of read('{}') does not match '{}'.\" .format(infile, testfile)) for dat1, dat2, msg in", "Py2 # for some reason a very small numer of the values in", "the built-in declination! td_sig = read('BenchFile01.ad2cp') td_sigi = read('Sig1000_IMU.ad2cp', userdata=False) td_sigi_ud = read('Sig1000_IMU.ad2cp')", "= tb.load_tdata save = tb.save_tdata dat_rdi = load('RDI_test01.h5') dat_rdi_i = load('RDI_test01_rotate_beam2inst.h5') dat_awac =", "dat_rdi, msg('RDI_test01.000')), (td_sig, dat_sig, msg('BenchFile01.ad2cp')), (td_sigi, dat_sigi_tmp, msg('Sig1000_IMU.ad2cp')), (td_sigi_ud, dat_sigi_ud_tmp, msg('Sig1000_IMU_ud.ad2cp')), (td_awac, dat_awac,", "data??! td_sigi.pop('sys.temp_mag') dat_sigi_tmp = dat_sigi.copy() dat_sigi_tmp.pop('sys.temp_mag') td_sigi_ud.pop('sys.temp_mag') dat_sigi_ud_tmp = dat_sigi_ud.copy() dat_sigi_ud_tmp.pop('sys.temp_mag') else: dat_sigi_tmp", "(td_wr1, dat_wr1, msg('winriver01.PD0')), (td_wr2, dat_wr2, msg('winriver02.PD0')), ]: yield tb.data_equiv, dat1, dat2, msg if", "uses the built-in declination! td_sig = read('BenchFile01.ad2cp') td_sigi = read('Sig1000_IMU.ad2cp', userdata=False) td_sigi_ud =", "msg('AWAC_test01.wpr')), (td_awac_ud, dat_awac_ud, msg('AWAC_test01.wpr+userdata')), (td_wr1, dat_wr1, msg('winriver01.PD0')), (td_wr2, dat_wr2, msg('winriver02.PD0')), ]: yield tb.data_equiv,", "in [ (td_rdi, dat_rdi, msg('RDI_test01.000')), (td_sig, dat_sig, msg('BenchFile01.ad2cp')), (td_sigi, dat_sigi_tmp, msg('Sig1000_IMU.ad2cp')), (td_sigi_ud, dat_sigi_ud_tmp,", "dat_sigi_ud_tmp.pop('sys.temp_mag') else: dat_sigi_tmp = dat_sigi dat_sigi_ud_tmp = dat_sigi_ud def msg(infile): testfile = infile.split('.')[0]", "reason a very small numer of the values in temp_mag # are not", "td_awac = read('AWAC_test01.wpr', userdata=False) td_awac_ud = read('AWAC_test01.wpr') td_wr1 = read('winriver01.PD0') td_wr2 = read('winriver02.PD0')", "dat_awac, msg('AWAC_test01.wpr')), (td_awac_ud, dat_awac_ud, msg('AWAC_test01.wpr+userdata')), (td_wr1, dat_wr1, msg('winriver01.PD0')), (td_wr2, dat_wr2, msg('winriver02.PD0')), ]: yield", "save(td_sigi, 'Sig1000_IMU.h5') save(td_sigi_ud, 'Sig1000_IMU_ud.h5') save(td_awac, 'AWAC_test01.h5') save(td_awac_ud, 'AWAC_test01_ud.h5') save(td_wr1, 'winriver01.h5') save(td_wr2, 'winriver02.h5') return", "very small numer of the values in temp_mag # are not the same", "test_read(make_data=False): td_rdi = read('RDI_test01.000') # This uses the built-in declination! td_sig = read('BenchFile01.ad2cp')", "return if sys.version_info.major == 2: # This is a HACK for Py2 #", "= load('AWAC_test01.h5') dat_awac_ud = load('AWAC_test01_ud.h5') dat_sig = load('BenchFile01.h5') dat_sigi = load('Sig1000_IMU.h5') dat_sigi_ud =", "= read('BenchFile01.ad2cp') td_sigi = read('Sig1000_IMU.ad2cp', userdata=False) td_sigi_ud = read('Sig1000_IMU.ad2cp') td_awac = read('AWAC_test01.wpr', userdata=False)", "# are not the same for py2? # !CLEANUP! # BUG that's loading", "td_wr2 = read('winriver02.PD0') if make_data: save(td_rdi, 'RDI_test01.h5') save(td_sig, 'BenchFile01.h5') save(td_sigi, 'Sig1000_IMU.h5') save(td_sigi_ud, 'Sig1000_IMU_ud.h5')", "dat_sigi_ud def msg(infile): testfile = infile.split('.')[0] + '.h5' return (\"The output of read('{}')", "testfile)) for dat1, dat2, msg in [ (td_rdi, dat_rdi, msg('RDI_test01.000')), (td_sig, dat_sig, msg('BenchFile01.ad2cp')),", "load = tb.load_tdata save = tb.save_tdata dat_rdi = load('RDI_test01.h5') dat_rdi_i = load('RDI_test01_rotate_beam2inst.h5') dat_awac", "= dat_sigi.copy() dat_sigi_tmp.pop('sys.temp_mag') td_sigi_ud.pop('sys.temp_mag') dat_sigi_ud_tmp = dat_sigi_ud.copy() dat_sigi_ud_tmp.pop('sys.temp_mag') else: dat_sigi_tmp = dat_sigi dat_sigi_ud_tmp", "return (\"The output of read('{}') does not match '{}'.\" .format(infile, testfile)) for dat1,", "match '{}'.\" .format(infile, testfile)) for dat1, dat2, msg in [ (td_rdi, dat_rdi, msg('RDI_test01.000')),", "else: dat_sigi_tmp = dat_sigi dat_sigi_ud_tmp = dat_sigi_ud def msg(infile): testfile = infile.split('.')[0] +", "read('Sig1000_IMU.ad2cp', userdata=False) td_sigi_ud = read('Sig1000_IMU.ad2cp') td_awac = read('AWAC_test01.wpr', userdata=False) td_awac_ud = read('AWAC_test01.wpr') td_wr1", "'winriver01.h5') save(td_wr2, 'winriver02.h5') return if sys.version_info.major == 2: # This is a HACK", "is a HACK for Py2 # for some reason a very small numer", "(td_wr2, dat_wr2, msg('winriver02.PD0')), ]: yield tb.data_equiv, dat1, dat2, msg if __name__ == '__main__':", "if make_data: save(td_rdi, 'RDI_test01.h5') save(td_sig, 'BenchFile01.h5') save(td_sigi, 'Sig1000_IMU.h5') save(td_sigi_ud, 'Sig1000_IMU_ud.h5') save(td_awac, 'AWAC_test01.h5') save(td_awac_ud,", "dat_awac_ud = load('AWAC_test01_ud.h5') dat_sig = load('BenchFile01.h5') dat_sigi = load('Sig1000_IMU.h5') dat_sigi_ud = load('Sig1000_IMU_ud.h5') dat_wr1", "a very small numer of the values in temp_mag # are not the", "load('winriver02.h5') def test_read(make_data=False): td_rdi = read('RDI_test01.000') # This uses the built-in declination! td_sig", "as read import dolfyn.test.base as tb import sys load = tb.load_tdata save =", "= dat_sigi_ud.copy() dat_sigi_ud_tmp.pop('sys.temp_mag') else: dat_sigi_tmp = dat_sigi dat_sigi_ud_tmp = dat_sigi_ud def msg(infile): testfile", "values in temp_mag # are not the same for py2? # !CLEANUP! #", "dat_sigi = load('Sig1000_IMU.h5') dat_sigi_ud = load('Sig1000_IMU_ud.h5') dat_wr1 = load('winriver01.h5') dat_wr2 = load('winriver02.h5') def", "dat_wr1 = load('winriver01.h5') dat_wr2 = load('winriver02.h5') def test_read(make_data=False): td_rdi = read('RDI_test01.000') # This", "are not the same for py2? # !CLEANUP! # BUG that's loading different", "= read('RDI_test01.000') # This uses the built-in declination! td_sig = read('BenchFile01.ad2cp') td_sigi =", "= read('winriver01.PD0') td_wr2 = read('winriver02.PD0') if make_data: save(td_rdi, 'RDI_test01.h5') save(td_sig, 'BenchFile01.h5') save(td_sigi, 'Sig1000_IMU.h5')", "msg('Sig1000_IMU.ad2cp')), (td_sigi_ud, dat_sigi_ud_tmp, msg('Sig1000_IMU_ud.ad2cp')), (td_awac, dat_awac, msg('AWAC_test01.wpr')), (td_awac_ud, dat_awac_ud, msg('AWAC_test01.wpr+userdata')), (td_wr1, dat_wr1, msg('winriver01.PD0')),", "read_example as read import dolfyn.test.base as tb import sys load = tb.load_tdata save", "dat_rdi = load('RDI_test01.h5') dat_rdi_i = load('RDI_test01_rotate_beam2inst.h5') dat_awac = load('AWAC_test01.h5') dat_awac_ud = load('AWAC_test01_ud.h5') dat_sig", "different data??! td_sigi.pop('sys.temp_mag') dat_sigi_tmp = dat_sigi.copy() dat_sigi_tmp.pop('sys.temp_mag') td_sigi_ud.pop('sys.temp_mag') dat_sigi_ud_tmp = dat_sigi_ud.copy() dat_sigi_ud_tmp.pop('sys.temp_mag') else:" ]
[ "such device %s:%s', name, device_id) abort(404) log.info('Successfully removed soft limit for device %s:%s", "computation_node = abort_when_node_not_found(name) if int(soft_limit) < 0: log.error(str.format('Number is not positive: {}', soft_limit))", "\\ COMPUTATION_NODE_FETCHED_RESPONSE, DEVICE_IDENTIFIER_PARAM, DEVICE_SOFT_LIMIT_PARAM, \\ DEVICE_SOFT_LIMIT_SET_RESPONSE, DEVICE_NOT_FOUND_RESPONSE, \\ NODE_AND_DEVICE_PARAMS, DEVICE_SOFT_LIMIT_SET_RESPONSE_FAILURE class SoftLimit(Resource): @swagger.operation(", "%s:%s was already set in a database to %s', name, device_id, soft_limit) log.info('Stored", "such device: %s', device_id) abort(404) limit_info = { 'name': name, 'device_id': device_id, 'soft_limit':", "flask_restful_swagger import swagger from hpcpm.api import log from hpcpm.api.helpers.database import database from hpcpm.api.helpers.utils", "hpcpm.api.helpers.constants import COMPUTATION_NODE_PARAM_NAME, COMPUTATION_NODE_NOT_FOUND_RESPONSE, \\ COMPUTATION_NODE_FETCHED_RESPONSE, DEVICE_IDENTIFIER_PARAM, DEVICE_SOFT_LIMIT_PARAM, \\ DEVICE_SOFT_LIMIT_SET_RESPONSE, DEVICE_NOT_FOUND_RESPONSE, \\ NODE_AND_DEVICE_PARAMS,", "201 @swagger.operation( notes='This endpoint is used for getting soft limit information from database',", ") def delete(self, name, device_id): result = database.delete_soft_limit_info(name, device_id) if not result: log.info('No", "log.info('Successfully get device %s:%s soft limit info: %s', name, device_id, result) return result,", "device.', nickname='/nodes/computation_node/<string:name>/<string:device_id>/soft_limit', parameters=[ COMPUTATION_NODE_PARAM_NAME, DEVICE_IDENTIFIER_PARAM, DEVICE_SOFT_LIMIT_PARAM ], responseMessages=[ DEVICE_SOFT_LIMIT_SET_RESPONSE, DEVICE_SOFT_LIMIT_SET_RESPONSE_FAILURE, COMPUTATION_NODE_NOT_FOUND_RESPONSE ] )", "limit information from database and device', nickname='/nodes/computation_node/<string:name>/<string:device_id>/soft_limit', parameters=NODE_AND_DEVICE_PARAMS, responseMessages=[ COMPUTATION_NODE_FETCHED_RESPONSE, DEVICE_NOT_FOUND_RESPONSE, ] )", "name, device_id): result = database.delete_soft_limit_info(name, device_id) if not result: log.info('No such device %s:%s',", "device_id): result = database.delete_soft_limit_info(name, device_id) if not result: log.info('No such device %s:%s', name,", "DEVICE_IDENTIFIER_PARAM, DEVICE_SOFT_LIMIT_PARAM ], responseMessages=[ DEVICE_SOFT_LIMIT_SET_RESPONSE, DEVICE_SOFT_LIMIT_SET_RESPONSE_FAILURE, COMPUTATION_NODE_NOT_FOUND_RESPONSE ] ) def put(self, name, device_id):", "delete(self, name, device_id): result = database.delete_soft_limit_info(name, device_id) if not result: log.info('No such device", "device_id) abort(404) log.info('Successfully removed soft limit for device %s:%s soft limit info: %s',", "from flask_restful_swagger import swagger from hpcpm.api import log from hpcpm.api.helpers.database import database from", "} upsert_result = database.replace_soft_limit_for_device(name, device_id, limit_info) if upsert_result.modified_count: log.info('Power limit for device %s:%s", "notes='This endpoint is used for removing soft limit information from database and device',", "= database.delete_soft_limit_info(name, device_id) if not result: log.info('No such device %s:%s', name, device_id) abort(404)", "upsert_result.modified_count: log.info('Power limit for device %s:%s was already set in a database to", "log.info('Stored power limit info %s', limit_info) else: log.info('Stored power limit info %s on", "NODE_AND_DEVICE_PARAMS, DEVICE_SOFT_LIMIT_SET_RESPONSE_FAILURE class SoftLimit(Resource): @swagger.operation( notes='This endpoint is used for setting soft limit", "= request.args.get('soft_limit') abort_when_not_int(soft_limit) computation_node = abort_when_node_not_found(name) if int(soft_limit) < 0: log.error(str.format('Number is not", "log.info('No such device %s:%s', name, device_id) abort(404) log.info('Successfully removed soft limit for device", "is not positive: {}', soft_limit)) abort(400) if not any(d['id'] == device_id for d", "@swagger.operation( notes='This endpoint is used for getting soft limit information from database', nickname='/nodes/computation_node/<string:name>/<string:device_id>/soft_limit',", "name, device_id): result = database.get_soft_limit_for_device(name, device_id) if not result: log.info('No such device %s:%s',", "def put(self, name, device_id): soft_limit = request.args.get('soft_limit') abort_when_not_int(soft_limit) computation_node = abort_when_node_not_found(name) if int(soft_limit)", "info %s on id %s', limit_info, upsert_result.upserted_id) return 'Soft limit successfully set', 201", "hpcpm.api.helpers.database import database from hpcpm.api.helpers.utils import abort_when_not_int, abort_when_node_not_found from hpcpm.api.helpers.constants import COMPUTATION_NODE_PARAM_NAME, COMPUTATION_NODE_NOT_FOUND_RESPONSE,", "{ 'name': name, 'device_id': device_id, 'soft_limit': soft_limit } upsert_result = database.replace_soft_limit_for_device(name, device_id, limit_info)", "] ) def delete(self, name, device_id): result = database.delete_soft_limit_info(name, device_id) if not result:", "id %s', limit_info, upsert_result.upserted_id) return 'Soft limit successfully set', 201 @swagger.operation( notes='This endpoint", "name, device_id) abort(404) log.info('Successfully removed soft limit for device %s:%s soft limit info:", "and device', nickname='/nodes/computation_node/<string:name>/<string:device_id>/soft_limit', parameters=NODE_AND_DEVICE_PARAMS, responseMessages=[ COMPUTATION_NODE_FETCHED_RESPONSE, DEVICE_NOT_FOUND_RESPONSE, ] ) def delete(self, name, device_id):", "swagger from hpcpm.api import log from hpcpm.api.helpers.database import database from hpcpm.api.helpers.utils import abort_when_not_int,", "put(self, name, device_id): soft_limit = request.args.get('soft_limit') abort_when_not_int(soft_limit) computation_node = abort_when_node_not_found(name) if int(soft_limit) <", "COMPUTATION_NODE_FETCHED_RESPONSE, DEVICE_IDENTIFIER_PARAM, DEVICE_SOFT_LIMIT_PARAM, \\ DEVICE_SOFT_LIMIT_SET_RESPONSE, DEVICE_NOT_FOUND_RESPONSE, \\ NODE_AND_DEVICE_PARAMS, DEVICE_SOFT_LIMIT_SET_RESPONSE_FAILURE class SoftLimit(Resource): @swagger.operation( notes='This", "SoftLimit(Resource): @swagger.operation( notes='This endpoint is used for setting soft limit for given device.',", "a database to %s', name, device_id, soft_limit) log.info('Stored power limit info %s', limit_info)", "from hpcpm.api.helpers.database import database from hpcpm.api.helpers.utils import abort_when_not_int, abort_when_node_not_found from hpcpm.api.helpers.constants import COMPUTATION_NODE_PARAM_NAME,", "soft_limit) log.info('Stored power limit info %s', limit_info) else: log.info('Stored power limit info %s", "successfully set', 201 @swagger.operation( notes='This endpoint is used for getting soft limit information", "to %s', name, device_id, soft_limit) log.info('Stored power limit info %s', limit_info) else: log.info('Stored", "soft limit information from database and device', nickname='/nodes/computation_node/<string:name>/<string:device_id>/soft_limit', parameters=NODE_AND_DEVICE_PARAMS, responseMessages=[ COMPUTATION_NODE_FETCHED_RESPONSE, DEVICE_NOT_FOUND_RESPONSE, ]", "removed soft limit for device %s:%s soft limit info: %s', name, device_id, result)", "power limit info %s', limit_info) else: log.info('Stored power limit info %s on id", "removing soft limit information from database and device', nickname='/nodes/computation_node/<string:name>/<string:device_id>/soft_limit', parameters=NODE_AND_DEVICE_PARAMS, responseMessages=[ COMPUTATION_NODE_FETCHED_RESPONSE, DEVICE_NOT_FOUND_RESPONSE,", "import Resource, request, abort from flask_restful_swagger import swagger from hpcpm.api import log from", "abort_when_node_not_found(name) if int(soft_limit) < 0: log.error(str.format('Number is not positive: {}', soft_limit)) abort(400) if", "positive: {}', soft_limit)) abort(400) if not any(d['id'] == device_id for d in computation_node['backend_info']['devices']):", "log from hpcpm.api.helpers.database import database from hpcpm.api.helpers.utils import abort_when_not_int, abort_when_node_not_found from hpcpm.api.helpers.constants import", "notes='This endpoint is used for getting soft limit information from database', nickname='/nodes/computation_node/<string:name>/<string:device_id>/soft_limit', parameters=NODE_AND_DEVICE_PARAMS,", "COMPUTATION_NODE_FETCHED_RESPONSE, DEVICE_NOT_FOUND_RESPONSE, ] ) def delete(self, name, device_id): result = database.delete_soft_limit_info(name, device_id) if", "device_id, result) return result, 200 @swagger.operation( notes='This endpoint is used for removing soft", "soft_limit } upsert_result = database.replace_soft_limit_for_device(name, device_id, limit_info) if upsert_result.modified_count: log.info('Power limit for device", "%s:%s', name, device_id) abort(404) log.info('Successfully removed soft limit for device %s:%s soft limit", "limit info: %s', name, device_id, result) return result, 200 @swagger.operation( notes='This endpoint is", "limit_info, upsert_result.upserted_id) return 'Soft limit successfully set', 201 @swagger.operation( notes='This endpoint is used", "DEVICE_SOFT_LIMIT_PARAM ], responseMessages=[ DEVICE_SOFT_LIMIT_SET_RESPONSE, DEVICE_SOFT_LIMIT_SET_RESPONSE_FAILURE, COMPUTATION_NODE_NOT_FOUND_RESPONSE ] ) def put(self, name, device_id): soft_limit", "name, device_id) abort(404) log.info('Successfully get device %s:%s soft limit info: %s', name, device_id,", "import abort_when_not_int, abort_when_node_not_found from hpcpm.api.helpers.constants import COMPUTATION_NODE_PARAM_NAME, COMPUTATION_NODE_NOT_FOUND_RESPONSE, \\ COMPUTATION_NODE_FETCHED_RESPONSE, DEVICE_IDENTIFIER_PARAM, DEVICE_SOFT_LIMIT_PARAM, \\", "from hpcpm.api.helpers.constants import COMPUTATION_NODE_PARAM_NAME, COMPUTATION_NODE_NOT_FOUND_RESPONSE, \\ COMPUTATION_NODE_FETCHED_RESPONSE, DEVICE_IDENTIFIER_PARAM, DEVICE_SOFT_LIMIT_PARAM, \\ DEVICE_SOFT_LIMIT_SET_RESPONSE, DEVICE_NOT_FOUND_RESPONSE, \\", "parameters=NODE_AND_DEVICE_PARAMS, responseMessages=[ COMPUTATION_NODE_FETCHED_RESPONSE, DEVICE_NOT_FOUND_RESPONSE ] ) def get(self, name, device_id): result = database.get_soft_limit_for_device(name,", "'Soft limit successfully set', 201 @swagger.operation( notes='This endpoint is used for getting soft", "name, device_id, result) return result, 200 @swagger.operation( notes='This endpoint is used for removing", "'soft_limit': soft_limit } upsert_result = database.replace_soft_limit_for_device(name, device_id, limit_info) if upsert_result.modified_count: log.info('Power limit for", "def get(self, name, device_id): result = database.get_soft_limit_for_device(name, device_id) if not result: log.info('No such", "request.args.get('soft_limit') abort_when_not_int(soft_limit) computation_node = abort_when_node_not_found(name) if int(soft_limit) < 0: log.error(str.format('Number is not positive:", "upsert_result.upserted_id) return 'Soft limit successfully set', 201 @swagger.operation( notes='This endpoint is used for", "for getting soft limit information from database', nickname='/nodes/computation_node/<string:name>/<string:device_id>/soft_limit', parameters=NODE_AND_DEVICE_PARAMS, responseMessages=[ COMPUTATION_NODE_FETCHED_RESPONSE, DEVICE_NOT_FOUND_RESPONSE ]", "not positive: {}', soft_limit)) abort(400) if not any(d['id'] == device_id for d in", ") def put(self, name, device_id): soft_limit = request.args.get('soft_limit') abort_when_not_int(soft_limit) computation_node = abort_when_node_not_found(name) if", "endpoint is used for removing soft limit information from database and device', nickname='/nodes/computation_node/<string:name>/<string:device_id>/soft_limit',", "flask_restful import Resource, request, abort from flask_restful_swagger import swagger from hpcpm.api import log", "database.replace_soft_limit_for_device(name, device_id, limit_info) if upsert_result.modified_count: log.info('Power limit for device %s:%s was already set", "used for getting soft limit information from database', nickname='/nodes/computation_node/<string:name>/<string:device_id>/soft_limit', parameters=NODE_AND_DEVICE_PARAMS, responseMessages=[ COMPUTATION_NODE_FETCHED_RESPONSE, DEVICE_NOT_FOUND_RESPONSE", "information from database and device', nickname='/nodes/computation_node/<string:name>/<string:device_id>/soft_limit', parameters=NODE_AND_DEVICE_PARAMS, responseMessages=[ COMPUTATION_NODE_FETCHED_RESPONSE, DEVICE_NOT_FOUND_RESPONSE, ] ) def", "import log from hpcpm.api.helpers.database import database from hpcpm.api.helpers.utils import abort_when_not_int, abort_when_node_not_found from hpcpm.api.helpers.constants", "soft limit information from database', nickname='/nodes/computation_node/<string:name>/<string:device_id>/soft_limit', parameters=NODE_AND_DEVICE_PARAMS, responseMessages=[ COMPUTATION_NODE_FETCHED_RESPONSE, DEVICE_NOT_FOUND_RESPONSE ] ) def", "for device %s:%s soft limit info: %s', name, device_id, result) return result, 200", "abort(404) log.info('Successfully get device %s:%s soft limit info: %s', name, device_id, result) return", "responseMessages=[ DEVICE_SOFT_LIMIT_SET_RESPONSE, DEVICE_SOFT_LIMIT_SET_RESPONSE_FAILURE, COMPUTATION_NODE_NOT_FOUND_RESPONSE ] ) def put(self, name, device_id): soft_limit = request.args.get('soft_limit')", "limit information from database', nickname='/nodes/computation_node/<string:name>/<string:device_id>/soft_limit', parameters=NODE_AND_DEVICE_PARAMS, responseMessages=[ COMPUTATION_NODE_FETCHED_RESPONSE, DEVICE_NOT_FOUND_RESPONSE ] ) def get(self,", "device_id): result = database.get_soft_limit_for_device(name, device_id) if not result: log.info('No such device %s:%s', name,", "= database.replace_soft_limit_for_device(name, device_id, limit_info) if upsert_result.modified_count: log.info('Power limit for device %s:%s was already", "hpcpm.api import log from hpcpm.api.helpers.database import database from hpcpm.api.helpers.utils import abort_when_not_int, abort_when_node_not_found from", "abort from flask_restful_swagger import swagger from hpcpm.api import log from hpcpm.api.helpers.database import database", "device_id, soft_limit) log.info('Stored power limit info %s', limit_info) else: log.info('Stored power limit info", "soft limit for device %s:%s soft limit info: %s', name, device_id, result) return", "d in computation_node['backend_info']['devices']): log.error('There is no such device: %s', device_id) abort(404) limit_info =", "def delete(self, name, device_id): result = database.delete_soft_limit_info(name, device_id) if not result: log.info('No such", "device %s:%s', name, device_id) abort(404) log.info('Successfully removed soft limit for device %s:%s soft", "hpcpm.api.helpers.utils import abort_when_not_int, abort_when_node_not_found from hpcpm.api.helpers.constants import COMPUTATION_NODE_PARAM_NAME, COMPUTATION_NODE_NOT_FOUND_RESPONSE, \\ COMPUTATION_NODE_FETCHED_RESPONSE, DEVICE_IDENTIFIER_PARAM, DEVICE_SOFT_LIMIT_PARAM,", "limit info %s', limit_info) else: log.info('Stored power limit info %s on id %s',", "used for removing soft limit information from database and device', nickname='/nodes/computation_node/<string:name>/<string:device_id>/soft_limit', parameters=NODE_AND_DEVICE_PARAMS, responseMessages=[", "nickname='/nodes/computation_node/<string:name>/<string:device_id>/soft_limit', parameters=[ COMPUTATION_NODE_PARAM_NAME, DEVICE_IDENTIFIER_PARAM, DEVICE_SOFT_LIMIT_PARAM ], responseMessages=[ DEVICE_SOFT_LIMIT_SET_RESPONSE, DEVICE_SOFT_LIMIT_SET_RESPONSE_FAILURE, COMPUTATION_NODE_NOT_FOUND_RESPONSE ] ) def", "DEVICE_NOT_FOUND_RESPONSE ] ) def get(self, name, device_id): result = database.get_soft_limit_for_device(name, device_id) if not", "'device_id': device_id, 'soft_limit': soft_limit } upsert_result = database.replace_soft_limit_for_device(name, device_id, limit_info) if upsert_result.modified_count: log.info('Power", "set', 201 @swagger.operation( notes='This endpoint is used for getting soft limit information from", "request, abort from flask_restful_swagger import swagger from hpcpm.api import log from hpcpm.api.helpers.database import", "COMPUTATION_NODE_PARAM_NAME, DEVICE_IDENTIFIER_PARAM, DEVICE_SOFT_LIMIT_PARAM ], responseMessages=[ DEVICE_SOFT_LIMIT_SET_RESPONSE, DEVICE_SOFT_LIMIT_SET_RESPONSE_FAILURE, COMPUTATION_NODE_NOT_FOUND_RESPONSE ] ) def put(self, name,", "abort(400) if not any(d['id'] == device_id for d in computation_node['backend_info']['devices']): log.error('There is no", "setting soft limit for given device.', nickname='/nodes/computation_node/<string:name>/<string:device_id>/soft_limit', parameters=[ COMPUTATION_NODE_PARAM_NAME, DEVICE_IDENTIFIER_PARAM, DEVICE_SOFT_LIMIT_PARAM ], responseMessages=[", "%s:%s soft limit info: %s', name, device_id, result) return result, 200 @swagger.operation( notes='This", "int(soft_limit) < 0: log.error(str.format('Number is not positive: {}', soft_limit)) abort(400) if not any(d['id']", "device_id) if not result: log.info('No such device %s:%s', name, device_id) abort(404) log.info('Successfully removed", "soft_limit)) abort(400) if not any(d['id'] == device_id for d in computation_node['backend_info']['devices']): log.error('There is", "is no such device: %s', device_id) abort(404) limit_info = { 'name': name, 'device_id':", "from flask_restful import Resource, request, abort from flask_restful_swagger import swagger from hpcpm.api import", "info: %s', name, device_id, result) return result, 200 @swagger.operation( notes='This endpoint is used", "200 @swagger.operation( notes='This endpoint is used for removing soft limit information from database", "limit for given device.', nickname='/nodes/computation_node/<string:name>/<string:device_id>/soft_limit', parameters=[ COMPUTATION_NODE_PARAM_NAME, DEVICE_IDENTIFIER_PARAM, DEVICE_SOFT_LIMIT_PARAM ], responseMessages=[ DEVICE_SOFT_LIMIT_SET_RESPONSE, DEVICE_SOFT_LIMIT_SET_RESPONSE_FAILURE,", "if not any(d['id'] == device_id for d in computation_node['backend_info']['devices']): log.error('There is no such", "DEVICE_SOFT_LIMIT_SET_RESPONSE_FAILURE class SoftLimit(Resource): @swagger.operation( notes='This endpoint is used for setting soft limit for", "power limit info %s on id %s', limit_info, upsert_result.upserted_id) return 'Soft limit successfully", "DEVICE_SOFT_LIMIT_PARAM, \\ DEVICE_SOFT_LIMIT_SET_RESPONSE, DEVICE_NOT_FOUND_RESPONSE, \\ NODE_AND_DEVICE_PARAMS, DEVICE_SOFT_LIMIT_SET_RESPONSE_FAILURE class SoftLimit(Resource): @swagger.operation( notes='This endpoint is", "result: log.info('No such device %s:%s', name, device_id) abort(404) log.info('Successfully get device %s:%s soft", ") def get(self, name, device_id): result = database.get_soft_limit_for_device(name, device_id) if not result: log.info('No", "device_id) abort(404) log.info('Successfully get device %s:%s soft limit info: %s', name, device_id, result)", "device_id) if not result: log.info('No such device %s:%s', name, device_id) abort(404) log.info('Successfully get", "'name': name, 'device_id': device_id, 'soft_limit': soft_limit } upsert_result = database.replace_soft_limit_for_device(name, device_id, limit_info) if", "result, 200 @swagger.operation( notes='This endpoint is used for removing soft limit information from", "return 'Soft limit successfully set', 201 @swagger.operation( notes='This endpoint is used for getting", "get device %s:%s soft limit info: %s', name, device_id, result) return result, 200", "upsert_result = database.replace_soft_limit_for_device(name, device_id, limit_info) if upsert_result.modified_count: log.info('Power limit for device %s:%s was", "device_id, 'soft_limit': soft_limit } upsert_result = database.replace_soft_limit_for_device(name, device_id, limit_info) if upsert_result.modified_count: log.info('Power limit", "given device.', nickname='/nodes/computation_node/<string:name>/<string:device_id>/soft_limit', parameters=[ COMPUTATION_NODE_PARAM_NAME, DEVICE_IDENTIFIER_PARAM, DEVICE_SOFT_LIMIT_PARAM ], responseMessages=[ DEVICE_SOFT_LIMIT_SET_RESPONSE, DEVICE_SOFT_LIMIT_SET_RESPONSE_FAILURE, COMPUTATION_NODE_NOT_FOUND_RESPONSE ]", "] ) def put(self, name, device_id): soft_limit = request.args.get('soft_limit') abort_when_not_int(soft_limit) computation_node = abort_when_node_not_found(name)", "= { 'name': name, 'device_id': device_id, 'soft_limit': soft_limit } upsert_result = database.replace_soft_limit_for_device(name, device_id,", "class SoftLimit(Resource): @swagger.operation( notes='This endpoint is used for setting soft limit for given", "COMPUTATION_NODE_NOT_FOUND_RESPONSE ] ) def put(self, name, device_id): soft_limit = request.args.get('soft_limit') abort_when_not_int(soft_limit) computation_node =", "< 0: log.error(str.format('Number is not positive: {}', soft_limit)) abort(400) if not any(d['id'] ==", "import swagger from hpcpm.api import log from hpcpm.api.helpers.database import database from hpcpm.api.helpers.utils import", "no such device: %s', device_id) abort(404) limit_info = { 'name': name, 'device_id': device_id,", "else: log.info('Stored power limit info %s on id %s', limit_info, upsert_result.upserted_id) return 'Soft", "COMPUTATION_NODE_PARAM_NAME, COMPUTATION_NODE_NOT_FOUND_RESPONSE, \\ COMPUTATION_NODE_FETCHED_RESPONSE, DEVICE_IDENTIFIER_PARAM, DEVICE_SOFT_LIMIT_PARAM, \\ DEVICE_SOFT_LIMIT_SET_RESPONSE, DEVICE_NOT_FOUND_RESPONSE, \\ NODE_AND_DEVICE_PARAMS, DEVICE_SOFT_LIMIT_SET_RESPONSE_FAILURE class", "DEVICE_SOFT_LIMIT_SET_RESPONSE, DEVICE_NOT_FOUND_RESPONSE, \\ NODE_AND_DEVICE_PARAMS, DEVICE_SOFT_LIMIT_SET_RESPONSE_FAILURE class SoftLimit(Resource): @swagger.operation( notes='This endpoint is used for", "from hpcpm.api.helpers.utils import abort_when_not_int, abort_when_node_not_found from hpcpm.api.helpers.constants import COMPUTATION_NODE_PARAM_NAME, COMPUTATION_NODE_NOT_FOUND_RESPONSE, \\ COMPUTATION_NODE_FETCHED_RESPONSE, DEVICE_IDENTIFIER_PARAM,", "name, 'device_id': device_id, 'soft_limit': soft_limit } upsert_result = database.replace_soft_limit_for_device(name, device_id, limit_info) if upsert_result.modified_count:", "@swagger.operation( notes='This endpoint is used for setting soft limit for given device.', nickname='/nodes/computation_node/<string:name>/<string:device_id>/soft_limit',", "database from hpcpm.api.helpers.utils import abort_when_not_int, abort_when_node_not_found from hpcpm.api.helpers.constants import COMPUTATION_NODE_PARAM_NAME, COMPUTATION_NODE_NOT_FOUND_RESPONSE, \\ COMPUTATION_NODE_FETCHED_RESPONSE,", "abort_when_node_not_found from hpcpm.api.helpers.constants import COMPUTATION_NODE_PARAM_NAME, COMPUTATION_NODE_NOT_FOUND_RESPONSE, \\ COMPUTATION_NODE_FETCHED_RESPONSE, DEVICE_IDENTIFIER_PARAM, DEVICE_SOFT_LIMIT_PARAM, \\ DEVICE_SOFT_LIMIT_SET_RESPONSE, DEVICE_NOT_FOUND_RESPONSE,", "for device %s:%s was already set in a database to %s', name, device_id,", "DEVICE_IDENTIFIER_PARAM, DEVICE_SOFT_LIMIT_PARAM, \\ DEVICE_SOFT_LIMIT_SET_RESPONSE, DEVICE_NOT_FOUND_RESPONSE, \\ NODE_AND_DEVICE_PARAMS, DEVICE_SOFT_LIMIT_SET_RESPONSE_FAILURE class SoftLimit(Resource): @swagger.operation( notes='This endpoint", "notes='This endpoint is used for setting soft limit for given device.', nickname='/nodes/computation_node/<string:name>/<string:device_id>/soft_limit', parameters=[", "limit_info = { 'name': name, 'device_id': device_id, 'soft_limit': soft_limit } upsert_result = database.replace_soft_limit_for_device(name,", "%s', name, device_id, soft_limit) log.info('Stored power limit info %s', limit_info) else: log.info('Stored power", "abort_when_not_int, abort_when_node_not_found from hpcpm.api.helpers.constants import COMPUTATION_NODE_PARAM_NAME, COMPUTATION_NODE_NOT_FOUND_RESPONSE, \\ COMPUTATION_NODE_FETCHED_RESPONSE, DEVICE_IDENTIFIER_PARAM, DEVICE_SOFT_LIMIT_PARAM, \\ DEVICE_SOFT_LIMIT_SET_RESPONSE,", "from database', nickname='/nodes/computation_node/<string:name>/<string:device_id>/soft_limit', parameters=NODE_AND_DEVICE_PARAMS, responseMessages=[ COMPUTATION_NODE_FETCHED_RESPONSE, DEVICE_NOT_FOUND_RESPONSE ] ) def get(self, name, device_id):", "limit_info) else: log.info('Stored power limit info %s on id %s', limit_info, upsert_result.upserted_id) return", "responseMessages=[ COMPUTATION_NODE_FETCHED_RESPONSE, DEVICE_NOT_FOUND_RESPONSE ] ) def get(self, name, device_id): result = database.get_soft_limit_for_device(name, device_id)", "not result: log.info('No such device %s:%s', name, device_id) abort(404) log.info('Successfully get device %s:%s", "device %s:%s soft limit info: %s', name, device_id, result) return result, 200 @swagger.operation(", "information from database', nickname='/nodes/computation_node/<string:name>/<string:device_id>/soft_limit', parameters=NODE_AND_DEVICE_PARAMS, responseMessages=[ COMPUTATION_NODE_FETCHED_RESPONSE, DEVICE_NOT_FOUND_RESPONSE ] ) def get(self, name,", "nickname='/nodes/computation_node/<string:name>/<string:device_id>/soft_limit', parameters=NODE_AND_DEVICE_PARAMS, responseMessages=[ COMPUTATION_NODE_FETCHED_RESPONSE, DEVICE_NOT_FOUND_RESPONSE, ] ) def delete(self, name, device_id): result =", "database.delete_soft_limit_info(name, device_id) if not result: log.info('No such device %s:%s', name, device_id) abort(404) log.info('Successfully", "], responseMessages=[ DEVICE_SOFT_LIMIT_SET_RESPONSE, DEVICE_SOFT_LIMIT_SET_RESPONSE_FAILURE, COMPUTATION_NODE_NOT_FOUND_RESPONSE ] ) def put(self, name, device_id): soft_limit =", "import database from hpcpm.api.helpers.utils import abort_when_not_int, abort_when_node_not_found from hpcpm.api.helpers.constants import COMPUTATION_NODE_PARAM_NAME, COMPUTATION_NODE_NOT_FOUND_RESPONSE, \\", "if int(soft_limit) < 0: log.error(str.format('Number is not positive: {}', soft_limit)) abort(400) if not", "\\ DEVICE_SOFT_LIMIT_SET_RESPONSE, DEVICE_NOT_FOUND_RESPONSE, \\ NODE_AND_DEVICE_PARAMS, DEVICE_SOFT_LIMIT_SET_RESPONSE_FAILURE class SoftLimit(Resource): @swagger.operation( notes='This endpoint is used", "log.info('Successfully removed soft limit for device %s:%s soft limit info: %s', name, device_id,", "for setting soft limit for given device.', nickname='/nodes/computation_node/<string:name>/<string:device_id>/soft_limit', parameters=[ COMPUTATION_NODE_PARAM_NAME, DEVICE_IDENTIFIER_PARAM, DEVICE_SOFT_LIMIT_PARAM ],", "log.info('No such device %s:%s', name, device_id) abort(404) log.info('Successfully get device %s:%s soft limit", "0: log.error(str.format('Number is not positive: {}', soft_limit)) abort(400) if not any(d['id'] == device_id", "log.error('There is no such device: %s', device_id) abort(404) limit_info = { 'name': name,", "\\ NODE_AND_DEVICE_PARAMS, DEVICE_SOFT_LIMIT_SET_RESPONSE_FAILURE class SoftLimit(Resource): @swagger.operation( notes='This endpoint is used for setting soft", "= database.get_soft_limit_for_device(name, device_id) if not result: log.info('No such device %s:%s', name, device_id) abort(404)", "device', nickname='/nodes/computation_node/<string:name>/<string:device_id>/soft_limit', parameters=NODE_AND_DEVICE_PARAMS, responseMessages=[ COMPUTATION_NODE_FETCHED_RESPONSE, DEVICE_NOT_FOUND_RESPONSE, ] ) def delete(self, name, device_id): result", "device_id for d in computation_node['backend_info']['devices']): log.error('There is no such device: %s', device_id) abort(404)", "limit_info) if upsert_result.modified_count: log.info('Power limit for device %s:%s was already set in a", "parameters=NODE_AND_DEVICE_PARAMS, responseMessages=[ COMPUTATION_NODE_FETCHED_RESPONSE, DEVICE_NOT_FOUND_RESPONSE, ] ) def delete(self, name, device_id): result = database.delete_soft_limit_info(name,", "endpoint is used for setting soft limit for given device.', nickname='/nodes/computation_node/<string:name>/<string:device_id>/soft_limit', parameters=[ COMPUTATION_NODE_PARAM_NAME,", "log.info('Stored power limit info %s on id %s', limit_info, upsert_result.upserted_id) return 'Soft limit", "log.error(str.format('Number is not positive: {}', soft_limit)) abort(400) if not any(d['id'] == device_id for", "Resource, request, abort from flask_restful_swagger import swagger from hpcpm.api import log from hpcpm.api.helpers.database", "was already set in a database to %s', name, device_id, soft_limit) log.info('Stored power", "name, device_id, soft_limit) log.info('Stored power limit info %s', limit_info) else: log.info('Stored power limit", "from database and device', nickname='/nodes/computation_node/<string:name>/<string:device_id>/soft_limit', parameters=NODE_AND_DEVICE_PARAMS, responseMessages=[ COMPUTATION_NODE_FETCHED_RESPONSE, DEVICE_NOT_FOUND_RESPONSE, ] ) def delete(self,", "COMPUTATION_NODE_FETCHED_RESPONSE, DEVICE_NOT_FOUND_RESPONSE ] ) def get(self, name, device_id): result = database.get_soft_limit_for_device(name, device_id) if", "nickname='/nodes/computation_node/<string:name>/<string:device_id>/soft_limit', parameters=NODE_AND_DEVICE_PARAMS, responseMessages=[ COMPUTATION_NODE_FETCHED_RESPONSE, DEVICE_NOT_FOUND_RESPONSE ] ) def get(self, name, device_id): result =", "for d in computation_node['backend_info']['devices']): log.error('There is no such device: %s', device_id) abort(404) limit_info", "info %s', limit_info) else: log.info('Stored power limit info %s on id %s', limit_info,", "%s', limit_info) else: log.info('Stored power limit info %s on id %s', limit_info, upsert_result.upserted_id)", "== device_id for d in computation_node['backend_info']['devices']): log.error('There is no such device: %s', device_id)", "%s', name, device_id, result) return result, 200 @swagger.operation( notes='This endpoint is used for", "if not result: log.info('No such device %s:%s', name, device_id) abort(404) log.info('Successfully get device", "from hpcpm.api import log from hpcpm.api.helpers.database import database from hpcpm.api.helpers.utils import abort_when_not_int, abort_when_node_not_found", "device_id, limit_info) if upsert_result.modified_count: log.info('Power limit for device %s:%s was already set in", "log.info('Power limit for device %s:%s was already set in a database to %s',", "result) return result, 200 @swagger.operation( notes='This endpoint is used for removing soft limit", "if not result: log.info('No such device %s:%s', name, device_id) abort(404) log.info('Successfully removed soft", "endpoint is used for getting soft limit information from database', nickname='/nodes/computation_node/<string:name>/<string:device_id>/soft_limit', parameters=NODE_AND_DEVICE_PARAMS, responseMessages=[", "device_id): soft_limit = request.args.get('soft_limit') abort_when_not_int(soft_limit) computation_node = abort_when_node_not_found(name) if int(soft_limit) < 0: log.error(str.format('Number", "device %s:%s was already set in a database to %s', name, device_id, soft_limit)", "device_id) abort(404) limit_info = { 'name': name, 'device_id': device_id, 'soft_limit': soft_limit } upsert_result", "responseMessages=[ COMPUTATION_NODE_FETCHED_RESPONSE, DEVICE_NOT_FOUND_RESPONSE, ] ) def delete(self, name, device_id): result = database.delete_soft_limit_info(name, device_id)", "is used for getting soft limit information from database', nickname='/nodes/computation_node/<string:name>/<string:device_id>/soft_limit', parameters=NODE_AND_DEVICE_PARAMS, responseMessages=[ COMPUTATION_NODE_FETCHED_RESPONSE,", "is used for setting soft limit for given device.', nickname='/nodes/computation_node/<string:name>/<string:device_id>/soft_limit', parameters=[ COMPUTATION_NODE_PARAM_NAME, DEVICE_IDENTIFIER_PARAM,", "in a database to %s', name, device_id, soft_limit) log.info('Stored power limit info %s',", "%s', limit_info, upsert_result.upserted_id) return 'Soft limit successfully set', 201 @swagger.operation( notes='This endpoint is", "database to %s', name, device_id, soft_limit) log.info('Stored power limit info %s', limit_info) else:", "device %s:%s', name, device_id) abort(404) log.info('Successfully get device %s:%s soft limit info: %s',", "DEVICE_NOT_FOUND_RESPONSE, \\ NODE_AND_DEVICE_PARAMS, DEVICE_SOFT_LIMIT_SET_RESPONSE_FAILURE class SoftLimit(Resource): @swagger.operation( notes='This endpoint is used for setting", "such device %s:%s', name, device_id) abort(404) log.info('Successfully get device %s:%s soft limit info:", "abort(404) log.info('Successfully removed soft limit for device %s:%s soft limit info: %s', name,", "database', nickname='/nodes/computation_node/<string:name>/<string:device_id>/soft_limit', parameters=NODE_AND_DEVICE_PARAMS, responseMessages=[ COMPUTATION_NODE_FETCHED_RESPONSE, DEVICE_NOT_FOUND_RESPONSE ] ) def get(self, name, device_id): result", "DEVICE_SOFT_LIMIT_SET_RESPONSE_FAILURE, COMPUTATION_NODE_NOT_FOUND_RESPONSE ] ) def put(self, name, device_id): soft_limit = request.args.get('soft_limit') abort_when_not_int(soft_limit) computation_node", "database.get_soft_limit_for_device(name, device_id) if not result: log.info('No such device %s:%s', name, device_id) abort(404) log.info('Successfully", "soft limit info: %s', name, device_id, result) return result, 200 @swagger.operation( notes='This endpoint", "abort(404) limit_info = { 'name': name, 'device_id': device_id, 'soft_limit': soft_limit } upsert_result =", "limit successfully set', 201 @swagger.operation( notes='This endpoint is used for getting soft limit", "computation_node['backend_info']['devices']): log.error('There is no such device: %s', device_id) abort(404) limit_info = { 'name':", "COMPUTATION_NODE_NOT_FOUND_RESPONSE, \\ COMPUTATION_NODE_FETCHED_RESPONSE, DEVICE_IDENTIFIER_PARAM, DEVICE_SOFT_LIMIT_PARAM, \\ DEVICE_SOFT_LIMIT_SET_RESPONSE, DEVICE_NOT_FOUND_RESPONSE, \\ NODE_AND_DEVICE_PARAMS, DEVICE_SOFT_LIMIT_SET_RESPONSE_FAILURE class SoftLimit(Resource):", "] ) def get(self, name, device_id): result = database.get_soft_limit_for_device(name, device_id) if not result:", "is used for removing soft limit information from database and device', nickname='/nodes/computation_node/<string:name>/<string:device_id>/soft_limit', parameters=NODE_AND_DEVICE_PARAMS,", "limit for device %s:%s soft limit info: %s', name, device_id, result) return result,", "DEVICE_SOFT_LIMIT_SET_RESPONSE, DEVICE_SOFT_LIMIT_SET_RESPONSE_FAILURE, COMPUTATION_NODE_NOT_FOUND_RESPONSE ] ) def put(self, name, device_id): soft_limit = request.args.get('soft_limit') abort_when_not_int(soft_limit)", "set in a database to %s', name, device_id, soft_limit) log.info('Stored power limit info", "import COMPUTATION_NODE_PARAM_NAME, COMPUTATION_NODE_NOT_FOUND_RESPONSE, \\ COMPUTATION_NODE_FETCHED_RESPONSE, DEVICE_IDENTIFIER_PARAM, DEVICE_SOFT_LIMIT_PARAM, \\ DEVICE_SOFT_LIMIT_SET_RESPONSE, DEVICE_NOT_FOUND_RESPONSE, \\ NODE_AND_DEVICE_PARAMS, DEVICE_SOFT_LIMIT_SET_RESPONSE_FAILURE", "for removing soft limit information from database and device', nickname='/nodes/computation_node/<string:name>/<string:device_id>/soft_limit', parameters=NODE_AND_DEVICE_PARAMS, responseMessages=[ COMPUTATION_NODE_FETCHED_RESPONSE,", "result = database.delete_soft_limit_info(name, device_id) if not result: log.info('No such device %s:%s', name, device_id)", "used for setting soft limit for given device.', nickname='/nodes/computation_node/<string:name>/<string:device_id>/soft_limit', parameters=[ COMPUTATION_NODE_PARAM_NAME, DEVICE_IDENTIFIER_PARAM, DEVICE_SOFT_LIMIT_PARAM", "name, device_id): soft_limit = request.args.get('soft_limit') abort_when_not_int(soft_limit) computation_node = abort_when_node_not_found(name) if int(soft_limit) < 0:", "@swagger.operation( notes='This endpoint is used for removing soft limit information from database and", "database and device', nickname='/nodes/computation_node/<string:name>/<string:device_id>/soft_limit', parameters=NODE_AND_DEVICE_PARAMS, responseMessages=[ COMPUTATION_NODE_FETCHED_RESPONSE, DEVICE_NOT_FOUND_RESPONSE, ] ) def delete(self, name,", "in computation_node['backend_info']['devices']): log.error('There is no such device: %s', device_id) abort(404) limit_info = {", "any(d['id'] == device_id for d in computation_node['backend_info']['devices']): log.error('There is no such device: %s',", "if upsert_result.modified_count: log.info('Power limit for device %s:%s was already set in a database", "device: %s', device_id) abort(404) limit_info = { 'name': name, 'device_id': device_id, 'soft_limit': soft_limit", "return result, 200 @swagger.operation( notes='This endpoint is used for removing soft limit information", "parameters=[ COMPUTATION_NODE_PARAM_NAME, DEVICE_IDENTIFIER_PARAM, DEVICE_SOFT_LIMIT_PARAM ], responseMessages=[ DEVICE_SOFT_LIMIT_SET_RESPONSE, DEVICE_SOFT_LIMIT_SET_RESPONSE_FAILURE, COMPUTATION_NODE_NOT_FOUND_RESPONSE ] ) def put(self,", "not any(d['id'] == device_id for d in computation_node['backend_info']['devices']): log.error('There is no such device:", "abort_when_not_int(soft_limit) computation_node = abort_when_node_not_found(name) if int(soft_limit) < 0: log.error(str.format('Number is not positive: {}',", "get(self, name, device_id): result = database.get_soft_limit_for_device(name, device_id) if not result: log.info('No such device", "result = database.get_soft_limit_for_device(name, device_id) if not result: log.info('No such device %s:%s', name, device_id)", "limit info %s on id %s', limit_info, upsert_result.upserted_id) return 'Soft limit successfully set',", "limit for device %s:%s was already set in a database to %s', name,", "%s on id %s', limit_info, upsert_result.upserted_id) return 'Soft limit successfully set', 201 @swagger.operation(", "{}', soft_limit)) abort(400) if not any(d['id'] == device_id for d in computation_node['backend_info']['devices']): log.error('There", "DEVICE_NOT_FOUND_RESPONSE, ] ) def delete(self, name, device_id): result = database.delete_soft_limit_info(name, device_id) if not", "= abort_when_node_not_found(name) if int(soft_limit) < 0: log.error(str.format('Number is not positive: {}', soft_limit)) abort(400)", "result: log.info('No such device %s:%s', name, device_id) abort(404) log.info('Successfully removed soft limit for", "for given device.', nickname='/nodes/computation_node/<string:name>/<string:device_id>/soft_limit', parameters=[ COMPUTATION_NODE_PARAM_NAME, DEVICE_IDENTIFIER_PARAM, DEVICE_SOFT_LIMIT_PARAM ], responseMessages=[ DEVICE_SOFT_LIMIT_SET_RESPONSE, DEVICE_SOFT_LIMIT_SET_RESPONSE_FAILURE, COMPUTATION_NODE_NOT_FOUND_RESPONSE", "not result: log.info('No such device %s:%s', name, device_id) abort(404) log.info('Successfully removed soft limit", "getting soft limit information from database', nickname='/nodes/computation_node/<string:name>/<string:device_id>/soft_limit', parameters=NODE_AND_DEVICE_PARAMS, responseMessages=[ COMPUTATION_NODE_FETCHED_RESPONSE, DEVICE_NOT_FOUND_RESPONSE ] )", "on id %s', limit_info, upsert_result.upserted_id) return 'Soft limit successfully set', 201 @swagger.operation( notes='This", "soft_limit = request.args.get('soft_limit') abort_when_not_int(soft_limit) computation_node = abort_when_node_not_found(name) if int(soft_limit) < 0: log.error(str.format('Number is", "%s:%s', name, device_id) abort(404) log.info('Successfully get device %s:%s soft limit info: %s', name,", "already set in a database to %s', name, device_id, soft_limit) log.info('Stored power limit", "%s', device_id) abort(404) limit_info = { 'name': name, 'device_id': device_id, 'soft_limit': soft_limit }", "soft limit for given device.', nickname='/nodes/computation_node/<string:name>/<string:device_id>/soft_limit', parameters=[ COMPUTATION_NODE_PARAM_NAME, DEVICE_IDENTIFIER_PARAM, DEVICE_SOFT_LIMIT_PARAM ], responseMessages=[ DEVICE_SOFT_LIMIT_SET_RESPONSE," ]
[]
[ "> 6: print(\"SHOULD NOT CREATE SHUFFLE\") shuffle=\"noshuffle\" list_images = os.listdir(image_folder) driving_video = imageio.mimread(template_video)", "make_animation from skimage import img_as_ubyte warnings.filterwarnings(\"ignore\") if len(sys.argv) < 6: print(\"Usage: deepfake_multiple.py <source", "(256, 256))[..., :3] for frame in driving_video] generator, kp_detector = load_checkpoints(config_path='config/vox-256.yaml', checkpoint_path='vox-cpk.pth.tar') for", "numpy as np import matplotlib.pyplot as plt import matplotlib.animation as animation from skimage.transform", "make_animation(source_image, driving_video, generator, kp_detector, relative=False, adapt_movement_scale=True) #imageio.mimsave(\"testing.mp4\", [img_as_ubyte(frame) for frame in predictions2]) #os.system(f\"python3", "animation from skimage.transform import resize from IPython.display import HTML import warnings import sys", "= os.path.join(os.curdir, \"resources\", \"combos\", sys.argv[1], sys.argv[3]) final_vid_name = sys.argv[3] x = int(sys.argv[4]) y", "= imageio.mimread(template_video) driving_video = [resize(frame, (256, 256))[..., :3] for frame in driving_video] generator,", "os.path.join(os.curdir, \"resources\", \"combos\", \"createcombo.py\") os.system(f\"python3 {combiner} {source_folder} {template_video_name} {final_vid_name} {x} {y} {shuffle}\") sys.exit()", "from skimage import img_as_ubyte warnings.filterwarnings(\"ignore\") if len(sys.argv) < 6: print(\"Usage: deepfake_multiple.py <source name>", "{final_vid_name} {x} {y} {shuffle}\") sys.exit() #Resize image and video to 256x256 #save resulting", "sys.argv[1]) image_folder = os.path.join(os.curdir, \"resources\", \"combos\", sys.argv[1], \"images\") template_video = os.path.join(os.curdir, \"resources\", \"combos\",", "6: print(\"Usage: deepfake_multiple.py <source name> <template name> <final_vid_name> <rows> <columns> <no shuffle arg>\")", "import imageio import numpy as np import matplotlib.pyplot as plt import matplotlib.animation as", "video #predictions2 = make_animation(source_image, driving_video, generator, kp_detector, relative=False, adapt_movement_scale=True) #imageio.mimsave(\"testing.mp4\", [img_as_ubyte(frame) for frame", "imageio import numpy as np import matplotlib.pyplot as plt import matplotlib.animation as animation", "NOT CREATE SHUFFLE\") shuffle=\"noshuffle\" list_images = os.listdir(image_folder) driving_video = imageio.mimread(template_video) driving_video = [resize(frame,", "<template name> <final_vid_name> <rows> <columns> <no shuffle arg>\") sys.exit() source_folder = os.path.join(os.curdir, \"resources\",", "import resize from IPython.display import HTML import warnings import sys import os from", "\"resources\", \"combos\", sys.argv[1], \"images\") template_video = os.path.join(os.curdir, \"resources\", \"combos\", sys.argv[1], sys.argv[2]) template_video_name =", "shuffle = \"\" if len(sys.argv) > 6: print(\"SHOULD NOT CREATE SHUFFLE\") shuffle=\"noshuffle\" list_images", "if len(sys.argv) > 6: print(\"SHOULD NOT CREATE SHUFFLE\") shuffle=\"noshuffle\" list_images = os.listdir(image_folder) driving_video", "\"resources\", \"combos\", sys.argv[1], sys.argv[2]) template_video_name = sys.argv[2] gen_vid_folder = os.path.join(os.curdir, \"resources\", \"combos\", sys.argv[1],", "= os.listdir(image_folder) driving_video = imageio.mimread(template_video) driving_video = [resize(frame, (256, 256))[..., :3] for frame", "shuffle arg>\") sys.exit() source_folder = os.path.join(os.curdir, \"resources\", \"combos\", sys.argv[1]) image_folder = os.path.join(os.curdir, \"resources\",", "frame in driving_video] generator, kp_detector = load_checkpoints(config_path='config/vox-256.yaml', checkpoint_path='vox-cpk.pth.tar') for image in list_images: image_path", "import numpy as np import matplotlib.pyplot as plt import matplotlib.animation as animation from", "load_checkpoints from demo import make_animation from skimage import img_as_ubyte warnings.filterwarnings(\"ignore\") if len(sys.argv) <", "os.path.join(os.curdir, \"resources\", \"combos\", sys.argv[1], \"gen\") final_vid = os.path.join(os.curdir, \"resources\", \"combos\", sys.argv[1], sys.argv[3]) final_vid_name", "if len(sys.argv) < 6: print(\"Usage: deepfake_multiple.py <source name> <template name> <final_vid_name> <rows> <columns>", "combiner = os.path.join(os.curdir, \"resources\", \"combos\", \"createcombo.py\") os.system(f\"python3 {combiner} {source_folder} {template_video_name} {final_vid_name} {x} {y}", "matplotlib.animation as animation from skimage.transform import resize from IPython.display import HTML import warnings", "\"resources\", \"combos\", \"createcombo.py\") os.system(f\"python3 {combiner} {source_folder} {template_video_name} {final_vid_name} {x} {y} {shuffle}\") sys.exit() #Resize", "= [resize(frame, (256, 256))[..., :3] for frame in driving_video] generator, kp_detector = load_checkpoints(config_path='config/vox-256.yaml',", "sys.exit() source_folder = os.path.join(os.curdir, \"resources\", \"combos\", sys.argv[1]) image_folder = os.path.join(os.curdir, \"resources\", \"combos\", sys.argv[1],", "driving_video, generator, kp_detector, relative=True) imageio.mimsave(gen_vid, [img_as_ubyte(frame) for frame in predictions]) combiner = os.path.join(os.curdir,", "name> <final_vid_name> <rows> <columns> <no shuffle arg>\") sys.exit() source_folder = os.path.join(os.curdir, \"resources\", \"combos\",", "sys.argv[3] x = int(sys.argv[4]) y = int(sys.argv[5]) shuffle = \"\" if len(sys.argv) >", "[resize(frame, (256, 256))[..., :3] for frame in driving_video] generator, kp_detector = load_checkpoints(config_path='config/vox-256.yaml', checkpoint_path='vox-cpk.pth.tar')", "{source_folder} {template_video_name} {final_vid_name} {x} {y} {shuffle}\") sys.exit() #Resize image and video to 256x256", "from demo import make_animation from skimage import img_as_ubyte warnings.filterwarnings(\"ignore\") if len(sys.argv) < 6:", "image_path = os.path.join(image_folder, image) source_image = imageio.imread(image_path) source_image = resize(source_image, (256, 256))[..., :3]", "list_images: image_path = os.path.join(image_folder, image) source_image = imageio.imread(image_path) source_image = resize(source_image, (256, 256))[...,", "import matplotlib.pyplot as plt import matplotlib.animation as animation from skimage.transform import resize from", "img_as_ubyte warnings.filterwarnings(\"ignore\") if len(sys.argv) < 6: print(\"Usage: deepfake_multiple.py <source name> <template name> <final_vid_name>", "gen_vid_folder = os.path.join(os.curdir, \"resources\", \"combos\", sys.argv[1], \"gen\") final_vid = os.path.join(os.curdir, \"resources\", \"combos\", sys.argv[1],", "#save resulting video #predictions2 = make_animation(source_image, driving_video, generator, kp_detector, relative=False, adapt_movement_scale=True) #imageio.mimsave(\"testing.mp4\", [img_as_ubyte(frame)", "source_folder = os.path.join(os.curdir, \"resources\", \"combos\", sys.argv[1]) image_folder = os.path.join(os.curdir, \"resources\", \"combos\", sys.argv[1], \"images\")", "video to 256x256 #save resulting video #predictions2 = make_animation(source_image, driving_video, generator, kp_detector, relative=False,", "np import matplotlib.pyplot as plt import matplotlib.animation as animation from skimage.transform import resize", "resulting video #predictions2 = make_animation(source_image, driving_video, generator, kp_detector, relative=False, adapt_movement_scale=True) #imageio.mimsave(\"testing.mp4\", [img_as_ubyte(frame) for", "{template_video_name} {final_vid_name} {x} {y} {shuffle}\") sys.exit() #Resize image and video to 256x256 #save", "#imageio.mimsave(\"testing.mp4\", [img_as_ubyte(frame) for frame in predictions2]) #os.system(f\"python3 {createvid} {template_video} {gen_vid} {final_vid}\") #print(f\"VIDEO GENERATED:", "= sys.argv[2] gen_vid_folder = os.path.join(os.curdir, \"resources\", \"combos\", sys.argv[1], \"gen\") final_vid = os.path.join(os.curdir, \"resources\",", "os.path.join(os.curdir, \"resources\", \"combos\", sys.argv[1]) image_folder = os.path.join(os.curdir, \"resources\", \"combos\", sys.argv[1], \"images\") template_video =", "sys.exit() #Resize image and video to 256x256 #save resulting video #predictions2 = make_animation(source_image,", "adapt_movement_scale=True) #imageio.mimsave(\"testing.mp4\", [img_as_ubyte(frame) for frame in predictions2]) #os.system(f\"python3 {createvid} {template_video} {gen_vid} {final_vid}\") #print(f\"VIDEO", "sys.argv[1], \"gen\") final_vid = os.path.join(os.curdir, \"resources\", \"combos\", sys.argv[1], sys.argv[3]) final_vid_name = sys.argv[3] x", "< 6: print(\"Usage: deepfake_multiple.py <source name> <template name> <final_vid_name> <rows> <columns> <no shuffle", "imageio.mimsave(gen_vid, [img_as_ubyte(frame) for frame in predictions]) combiner = os.path.join(os.curdir, \"resources\", \"combos\", \"createcombo.py\") os.system(f\"python3", ":3] for frame in driving_video] generator, kp_detector = load_checkpoints(config_path='config/vox-256.yaml', checkpoint_path='vox-cpk.pth.tar') for image in", "image in list_images: image_path = os.path.join(image_folder, image) source_image = imageio.imread(image_path) source_image = resize(source_image,", "{shuffle}\") sys.exit() #Resize image and video to 256x256 #save resulting video #predictions2 =", "driving_video] generator, kp_detector = load_checkpoints(config_path='config/vox-256.yaml', checkpoint_path='vox-cpk.pth.tar') for image in list_images: image_path = os.path.join(image_folder,", "in list_images: image_path = os.path.join(image_folder, image) source_image = imageio.imread(image_path) source_image = resize(source_image, (256,", "shuffle=\"noshuffle\" list_images = os.listdir(image_folder) driving_video = imageio.mimread(template_video) driving_video = [resize(frame, (256, 256))[..., :3]", "checkpoint_path='vox-cpk.pth.tar') for image in list_images: image_path = os.path.join(image_folder, image) source_image = imageio.imread(image_path) source_image", "= os.path.join(os.curdir, \"resources\", \"combos\", sys.argv[1], \"images\") template_video = os.path.join(os.curdir, \"resources\", \"combos\", sys.argv[1], sys.argv[2])", "\"resources\", \"combos\", sys.argv[1], sys.argv[3]) final_vid_name = sys.argv[3] x = int(sys.argv[4]) y = int(sys.argv[5])", "as np import matplotlib.pyplot as plt import matplotlib.animation as animation from skimage.transform import", "<final_vid_name> <rows> <columns> <no shuffle arg>\") sys.exit() source_folder = os.path.join(os.curdir, \"resources\", \"combos\", sys.argv[1])", "plt import matplotlib.animation as animation from skimage.transform import resize from IPython.display import HTML", "gen_vid_name) if not os.path.exists(gen_vid): predictions = make_animation(source_image, driving_video, generator, kp_detector, relative=True) imageio.mimsave(gen_vid, [img_as_ubyte(frame)", "from skimage.transform import resize from IPython.display import HTML import warnings import sys import", "import matplotlib.animation as animation from skimage.transform import resize from IPython.display import HTML import", "generator, kp_detector, relative=False, adapt_movement_scale=True) #imageio.mimsave(\"testing.mp4\", [img_as_ubyte(frame) for frame in predictions2]) #os.system(f\"python3 {createvid} {template_video}", "demo import load_checkpoints from demo import make_animation from skimage import img_as_ubyte warnings.filterwarnings(\"ignore\") if", "from IPython.display import HTML import warnings import sys import os from demo import", "x = int(sys.argv[4]) y = int(sys.argv[5]) shuffle = \"\" if len(sys.argv) > 6:", "int(sys.argv[5]) shuffle = \"\" if len(sys.argv) > 6: print(\"SHOULD NOT CREATE SHUFFLE\") shuffle=\"noshuffle\"", "{combiner} {source_folder} {template_video_name} {final_vid_name} {x} {y} {shuffle}\") sys.exit() #Resize image and video to", "\"combos\", sys.argv[1], sys.argv[2]) template_video_name = sys.argv[2] gen_vid_folder = os.path.join(os.curdir, \"resources\", \"combos\", sys.argv[1], \"gen\")", "frame in predictions]) combiner = os.path.join(os.curdir, \"resources\", \"combos\", \"createcombo.py\") os.system(f\"python3 {combiner} {source_folder} {template_video_name}", "resize from IPython.display import HTML import warnings import sys import os from demo", "demo import make_animation from skimage import img_as_ubyte warnings.filterwarnings(\"ignore\") if len(sys.argv) < 6: print(\"Usage:", "= os.path.join(os.curdir, \"resources\", \"combos\", \"createcombo.py\") os.system(f\"python3 {combiner} {source_folder} {template_video_name} {final_vid_name} {x} {y} {shuffle}\")", "kp_detector, relative=False, adapt_movement_scale=True) #imageio.mimsave(\"testing.mp4\", [img_as_ubyte(frame) for frame in predictions2]) #os.system(f\"python3 {createvid} {template_video} {gen_vid}", "os.path.join(image_folder, image) source_image = imageio.imread(image_path) source_image = resize(source_image, (256, 256))[..., :3] gen_vid_name =", "= \"\" if len(sys.argv) > 6: print(\"SHOULD NOT CREATE SHUFFLE\") shuffle=\"noshuffle\" list_images =", "gen_vid_name = image.split(\".\")[0] gen_vid_name = f\"{gen_vid_name}_gen.mp4\" gen_vid = os.path.join(gen_vid_folder, gen_vid_name) if not os.path.exists(gen_vid):", "import sys import os from demo import load_checkpoints from demo import make_animation from", "import load_checkpoints from demo import make_animation from skimage import img_as_ubyte warnings.filterwarnings(\"ignore\") if len(sys.argv)", "for frame in driving_video] generator, kp_detector = load_checkpoints(config_path='config/vox-256.yaml', checkpoint_path='vox-cpk.pth.tar') for image in list_images:", "image_folder = os.path.join(os.curdir, \"resources\", \"combos\", sys.argv[1], \"images\") template_video = os.path.join(os.curdir, \"resources\", \"combos\", sys.argv[1],", "predictions]) combiner = os.path.join(os.curdir, \"resources\", \"combos\", \"createcombo.py\") os.system(f\"python3 {combiner} {source_folder} {template_video_name} {final_vid_name} {x}", "import make_animation from skimage import img_as_ubyte warnings.filterwarnings(\"ignore\") if len(sys.argv) < 6: print(\"Usage: deepfake_multiple.py", "kp_detector = load_checkpoints(config_path='config/vox-256.yaml', checkpoint_path='vox-cpk.pth.tar') for image in list_images: image_path = os.path.join(image_folder, image) source_image", "os.path.join(os.curdir, \"resources\", \"combos\", sys.argv[1], sys.argv[2]) template_video_name = sys.argv[2] gen_vid_folder = os.path.join(os.curdir, \"resources\", \"combos\",", "to 256x256 #save resulting video #predictions2 = make_animation(source_image, driving_video, generator, kp_detector, relative=False, adapt_movement_scale=True)", "os from demo import load_checkpoints from demo import make_animation from skimage import img_as_ubyte", "skimage import img_as_ubyte warnings.filterwarnings(\"ignore\") if len(sys.argv) < 6: print(\"Usage: deepfake_multiple.py <source name> <template", "f\"{gen_vid_name}_gen.mp4\" gen_vid = os.path.join(gen_vid_folder, gen_vid_name) if not os.path.exists(gen_vid): predictions = make_animation(source_image, driving_video, generator,", "as animation from skimage.transform import resize from IPython.display import HTML import warnings import", "driving_video = imageio.mimread(template_video) driving_video = [resize(frame, (256, 256))[..., :3] for frame in driving_video]", "load_checkpoints(config_path='config/vox-256.yaml', checkpoint_path='vox-cpk.pth.tar') for image in list_images: image_path = os.path.join(image_folder, image) source_image = imageio.imread(image_path)", "sys.argv[2]) template_video_name = sys.argv[2] gen_vid_folder = os.path.join(os.curdir, \"resources\", \"combos\", sys.argv[1], \"gen\") final_vid =", "\"images\") template_video = os.path.join(os.curdir, \"resources\", \"combos\", sys.argv[1], sys.argv[2]) template_video_name = sys.argv[2] gen_vid_folder =", "<no shuffle arg>\") sys.exit() source_folder = os.path.join(os.curdir, \"resources\", \"combos\", sys.argv[1]) image_folder = os.path.join(os.curdir,", "in driving_video] generator, kp_detector = load_checkpoints(config_path='config/vox-256.yaml', checkpoint_path='vox-cpk.pth.tar') for image in list_images: image_path =", "\"combos\", sys.argv[1]) image_folder = os.path.join(os.curdir, \"resources\", \"combos\", sys.argv[1], \"images\") template_video = os.path.join(os.curdir, \"resources\",", "sys.argv[1], sys.argv[3]) final_vid_name = sys.argv[3] x = int(sys.argv[4]) y = int(sys.argv[5]) shuffle =", "final_vid = os.path.join(os.curdir, \"resources\", \"combos\", sys.argv[1], sys.argv[3]) final_vid_name = sys.argv[3] x = int(sys.argv[4])", "os.listdir(image_folder) driving_video = imageio.mimread(template_video) driving_video = [resize(frame, (256, 256))[..., :3] for frame in", "= resize(source_image, (256, 256))[..., :3] gen_vid_name = image.split(\".\")[0] gen_vid_name = f\"{gen_vid_name}_gen.mp4\" gen_vid =", "relative=False, adapt_movement_scale=True) #imageio.mimsave(\"testing.mp4\", [img_as_ubyte(frame) for frame in predictions2]) #os.system(f\"python3 {createvid} {template_video} {gen_vid} {final_vid}\")", "name> <template name> <final_vid_name> <rows> <columns> <no shuffle arg>\") sys.exit() source_folder = os.path.join(os.curdir,", "(256, 256))[..., :3] gen_vid_name = image.split(\".\")[0] gen_vid_name = f\"{gen_vid_name}_gen.mp4\" gen_vid = os.path.join(gen_vid_folder, gen_vid_name)", "= int(sys.argv[4]) y = int(sys.argv[5]) shuffle = \"\" if len(sys.argv) > 6: print(\"SHOULD", "image and video to 256x256 #save resulting video #predictions2 = make_animation(source_image, driving_video, generator,", "gen_vid = os.path.join(gen_vid_folder, gen_vid_name) if not os.path.exists(gen_vid): predictions = make_animation(source_image, driving_video, generator, kp_detector,", "driving_video = [resize(frame, (256, 256))[..., :3] for frame in driving_video] generator, kp_detector =", "\"gen\") final_vid = os.path.join(os.curdir, \"resources\", \"combos\", sys.argv[1], sys.argv[3]) final_vid_name = sys.argv[3] x =", "= load_checkpoints(config_path='config/vox-256.yaml', checkpoint_path='vox-cpk.pth.tar') for image in list_images: image_path = os.path.join(image_folder, image) source_image =", "len(sys.argv) < 6: print(\"Usage: deepfake_multiple.py <source name> <template name> <final_vid_name> <rows> <columns> <no", "sys.argv[3]) final_vid_name = sys.argv[3] x = int(sys.argv[4]) y = int(sys.argv[5]) shuffle = \"\"", "= os.path.join(gen_vid_folder, gen_vid_name) if not os.path.exists(gen_vid): predictions = make_animation(source_image, driving_video, generator, kp_detector, relative=True)", "final_vid_name = sys.argv[3] x = int(sys.argv[4]) y = int(sys.argv[5]) shuffle = \"\" if", "= make_animation(source_image, driving_video, generator, kp_detector, relative=False, adapt_movement_scale=True) #imageio.mimsave(\"testing.mp4\", [img_as_ubyte(frame) for frame in predictions2])", "as plt import matplotlib.animation as animation from skimage.transform import resize from IPython.display import", "= make_animation(source_image, driving_video, generator, kp_detector, relative=True) imageio.mimsave(gen_vid, [img_as_ubyte(frame) for frame in predictions]) combiner", "HTML import warnings import sys import os from demo import load_checkpoints from demo", "\"resources\", \"combos\", sys.argv[1], \"gen\") final_vid = os.path.join(os.curdir, \"resources\", \"combos\", sys.argv[1], sys.argv[3]) final_vid_name =", "\"createcombo.py\") os.system(f\"python3 {combiner} {source_folder} {template_video_name} {final_vid_name} {x} {y} {shuffle}\") sys.exit() #Resize image and", "\"combos\", sys.argv[1], \"images\") template_video = os.path.join(os.curdir, \"resources\", \"combos\", sys.argv[1], sys.argv[2]) template_video_name = sys.argv[2]", "source_image = imageio.imread(image_path) source_image = resize(source_image, (256, 256))[..., :3] gen_vid_name = image.split(\".\")[0] gen_vid_name", "predictions = make_animation(source_image, driving_video, generator, kp_detector, relative=True) imageio.mimsave(gen_vid, [img_as_ubyte(frame) for frame in predictions])", "make_animation(source_image, driving_video, generator, kp_detector, relative=True) imageio.mimsave(gen_vid, [img_as_ubyte(frame) for frame in predictions]) combiner =", "import warnings import sys import os from demo import load_checkpoints from demo import", "and video to 256x256 #save resulting video #predictions2 = make_animation(source_image, driving_video, generator, kp_detector,", "#Resize image and video to 256x256 #save resulting video #predictions2 = make_animation(source_image, driving_video,", "int(sys.argv[4]) y = int(sys.argv[5]) shuffle = \"\" if len(sys.argv) > 6: print(\"SHOULD NOT", "= sys.argv[3] x = int(sys.argv[4]) y = int(sys.argv[5]) shuffle = \"\" if len(sys.argv)", "import HTML import warnings import sys import os from demo import load_checkpoints from", "import img_as_ubyte warnings.filterwarnings(\"ignore\") if len(sys.argv) < 6: print(\"Usage: deepfake_multiple.py <source name> <template name>", "= os.path.join(os.curdir, \"resources\", \"combos\", sys.argv[1]) image_folder = os.path.join(os.curdir, \"resources\", \"combos\", sys.argv[1], \"images\") template_video", "\"combos\", \"createcombo.py\") os.system(f\"python3 {combiner} {source_folder} {template_video_name} {final_vid_name} {x} {y} {shuffle}\") sys.exit() #Resize image", "os.system(f\"python3 {combiner} {source_folder} {template_video_name} {final_vid_name} {x} {y} {shuffle}\") sys.exit() #Resize image and video", "sys.argv[2] gen_vid_folder = os.path.join(os.curdir, \"resources\", \"combos\", sys.argv[1], \"gen\") final_vid = os.path.join(os.curdir, \"resources\", \"combos\",", "y = int(sys.argv[5]) shuffle = \"\" if len(sys.argv) > 6: print(\"SHOULD NOT CREATE", "os.path.exists(gen_vid): predictions = make_animation(source_image, driving_video, generator, kp_detector, relative=True) imageio.mimsave(gen_vid, [img_as_ubyte(frame) for frame in", "[img_as_ubyte(frame) for frame in predictions2]) #os.system(f\"python3 {createvid} {template_video} {gen_vid} {final_vid}\") #print(f\"VIDEO GENERATED: {final_vid}\")", "6: print(\"SHOULD NOT CREATE SHUFFLE\") shuffle=\"noshuffle\" list_images = os.listdir(image_folder) driving_video = imageio.mimread(template_video) driving_video", "SHUFFLE\") shuffle=\"noshuffle\" list_images = os.listdir(image_folder) driving_video = imageio.mimread(template_video) driving_video = [resize(frame, (256, 256))[...,", "source_image = resize(source_image, (256, 256))[..., :3] gen_vid_name = image.split(\".\")[0] gen_vid_name = f\"{gen_vid_name}_gen.mp4\" gen_vid", "len(sys.argv) > 6: print(\"SHOULD NOT CREATE SHUFFLE\") shuffle=\"noshuffle\" list_images = os.listdir(image_folder) driving_video =", "image.split(\".\")[0] gen_vid_name = f\"{gen_vid_name}_gen.mp4\" gen_vid = os.path.join(gen_vid_folder, gen_vid_name) if not os.path.exists(gen_vid): predictions =", "if not os.path.exists(gen_vid): predictions = make_animation(source_image, driving_video, generator, kp_detector, relative=True) imageio.mimsave(gen_vid, [img_as_ubyte(frame) for", "print(\"Usage: deepfake_multiple.py <source name> <template name> <final_vid_name> <rows> <columns> <no shuffle arg>\") sys.exit()", "imageio.imread(image_path) source_image = resize(source_image, (256, 256))[..., :3] gen_vid_name = image.split(\".\")[0] gen_vid_name = f\"{gen_vid_name}_gen.mp4\"", ":3] gen_vid_name = image.split(\".\")[0] gen_vid_name = f\"{gen_vid_name}_gen.mp4\" gen_vid = os.path.join(gen_vid_folder, gen_vid_name) if not", "in predictions]) combiner = os.path.join(os.curdir, \"resources\", \"combos\", \"createcombo.py\") os.system(f\"python3 {combiner} {source_folder} {template_video_name} {final_vid_name}", "list_images = os.listdir(image_folder) driving_video = imageio.mimread(template_video) driving_video = [resize(frame, (256, 256))[..., :3] for", "os.path.join(os.curdir, \"resources\", \"combos\", sys.argv[1], sys.argv[3]) final_vid_name = sys.argv[3] x = int(sys.argv[4]) y =", "os.path.join(gen_vid_folder, gen_vid_name) if not os.path.exists(gen_vid): predictions = make_animation(source_image, driving_video, generator, kp_detector, relative=True) imageio.mimsave(gen_vid,", "= os.path.join(os.curdir, \"resources\", \"combos\", sys.argv[1], \"gen\") final_vid = os.path.join(os.curdir, \"resources\", \"combos\", sys.argv[1], sys.argv[3])", "= imageio.imread(image_path) source_image = resize(source_image, (256, 256))[..., :3] gen_vid_name = image.split(\".\")[0] gen_vid_name =", "<rows> <columns> <no shuffle arg>\") sys.exit() source_folder = os.path.join(os.curdir, \"resources\", \"combos\", sys.argv[1]) image_folder", "from demo import load_checkpoints from demo import make_animation from skimage import img_as_ubyte warnings.filterwarnings(\"ignore\")", "sys.argv[1], sys.argv[2]) template_video_name = sys.argv[2] gen_vid_folder = os.path.join(os.curdir, \"resources\", \"combos\", sys.argv[1], \"gen\") final_vid", "gen_vid_name = f\"{gen_vid_name}_gen.mp4\" gen_vid = os.path.join(gen_vid_folder, gen_vid_name) if not os.path.exists(gen_vid): predictions = make_animation(source_image,", "not os.path.exists(gen_vid): predictions = make_animation(source_image, driving_video, generator, kp_detector, relative=True) imageio.mimsave(gen_vid, [img_as_ubyte(frame) for frame", "256x256 #save resulting video #predictions2 = make_animation(source_image, driving_video, generator, kp_detector, relative=False, adapt_movement_scale=True) #imageio.mimsave(\"testing.mp4\",", "skimage.transform import resize from IPython.display import HTML import warnings import sys import os", "256))[..., :3] gen_vid_name = image.split(\".\")[0] gen_vid_name = f\"{gen_vid_name}_gen.mp4\" gen_vid = os.path.join(gen_vid_folder, gen_vid_name) if", "generator, kp_detector = load_checkpoints(config_path='config/vox-256.yaml', checkpoint_path='vox-cpk.pth.tar') for image in list_images: image_path = os.path.join(image_folder, image)", "image) source_image = imageio.imread(image_path) source_image = resize(source_image, (256, 256))[..., :3] gen_vid_name = image.split(\".\")[0]", "\"combos\", sys.argv[1], \"gen\") final_vid = os.path.join(os.curdir, \"resources\", \"combos\", sys.argv[1], sys.argv[3]) final_vid_name = sys.argv[3]", "{x} {y} {shuffle}\") sys.exit() #Resize image and video to 256x256 #save resulting video", "<source name> <template name> <final_vid_name> <rows> <columns> <no shuffle arg>\") sys.exit() source_folder =", "warnings.filterwarnings(\"ignore\") if len(sys.argv) < 6: print(\"Usage: deepfake_multiple.py <source name> <template name> <final_vid_name> <rows>", "256))[..., :3] for frame in driving_video] generator, kp_detector = load_checkpoints(config_path='config/vox-256.yaml', checkpoint_path='vox-cpk.pth.tar') for image", "warnings import sys import os from demo import load_checkpoints from demo import make_animation", "= image.split(\".\")[0] gen_vid_name = f\"{gen_vid_name}_gen.mp4\" gen_vid = os.path.join(gen_vid_folder, gen_vid_name) if not os.path.exists(gen_vid): predictions", "imageio.mimread(template_video) driving_video = [resize(frame, (256, 256))[..., :3] for frame in driving_video] generator, kp_detector", "{y} {shuffle}\") sys.exit() #Resize image and video to 256x256 #save resulting video #predictions2", "#predictions2 = make_animation(source_image, driving_video, generator, kp_detector, relative=False, adapt_movement_scale=True) #imageio.mimsave(\"testing.mp4\", [img_as_ubyte(frame) for frame in", "for frame in predictions]) combiner = os.path.join(os.curdir, \"resources\", \"combos\", \"createcombo.py\") os.system(f\"python3 {combiner} {source_folder}", "<columns> <no shuffle arg>\") sys.exit() source_folder = os.path.join(os.curdir, \"resources\", \"combos\", sys.argv[1]) image_folder =", "= f\"{gen_vid_name}_gen.mp4\" gen_vid = os.path.join(gen_vid_folder, gen_vid_name) if not os.path.exists(gen_vid): predictions = make_animation(source_image, driving_video,", "template_video = os.path.join(os.curdir, \"resources\", \"combos\", sys.argv[1], sys.argv[2]) template_video_name = sys.argv[2] gen_vid_folder = os.path.join(os.curdir,", "resize(source_image, (256, 256))[..., :3] gen_vid_name = image.split(\".\")[0] gen_vid_name = f\"{gen_vid_name}_gen.mp4\" gen_vid = os.path.join(gen_vid_folder,", "= int(sys.argv[5]) shuffle = \"\" if len(sys.argv) > 6: print(\"SHOULD NOT CREATE SHUFFLE\")", "kp_detector, relative=True) imageio.mimsave(gen_vid, [img_as_ubyte(frame) for frame in predictions]) combiner = os.path.join(os.curdir, \"resources\", \"combos\",", "sys.argv[1], \"images\") template_video = os.path.join(os.curdir, \"resources\", \"combos\", sys.argv[1], sys.argv[2]) template_video_name = sys.argv[2] gen_vid_folder", "for image in list_images: image_path = os.path.join(image_folder, image) source_image = imageio.imread(image_path) source_image =", "driving_video, generator, kp_detector, relative=False, adapt_movement_scale=True) #imageio.mimsave(\"testing.mp4\", [img_as_ubyte(frame) for frame in predictions2]) #os.system(f\"python3 {createvid}", "arg>\") sys.exit() source_folder = os.path.join(os.curdir, \"resources\", \"combos\", sys.argv[1]) image_folder = os.path.join(os.curdir, \"resources\", \"combos\",", "os.path.join(os.curdir, \"resources\", \"combos\", sys.argv[1], \"images\") template_video = os.path.join(os.curdir, \"resources\", \"combos\", sys.argv[1], sys.argv[2]) template_video_name", "generator, kp_detector, relative=True) imageio.mimsave(gen_vid, [img_as_ubyte(frame) for frame in predictions]) combiner = os.path.join(os.curdir, \"resources\",", "deepfake_multiple.py <source name> <template name> <final_vid_name> <rows> <columns> <no shuffle arg>\") sys.exit() source_folder", "IPython.display import HTML import warnings import sys import os from demo import load_checkpoints", "matplotlib.pyplot as plt import matplotlib.animation as animation from skimage.transform import resize from IPython.display", "\"combos\", sys.argv[1], sys.argv[3]) final_vid_name = sys.argv[3] x = int(sys.argv[4]) y = int(sys.argv[5]) shuffle", "import os from demo import load_checkpoints from demo import make_animation from skimage import", "print(\"SHOULD NOT CREATE SHUFFLE\") shuffle=\"noshuffle\" list_images = os.listdir(image_folder) driving_video = imageio.mimread(template_video) driving_video =", "= os.path.join(image_folder, image) source_image = imageio.imread(image_path) source_image = resize(source_image, (256, 256))[..., :3] gen_vid_name", "template_video_name = sys.argv[2] gen_vid_folder = os.path.join(os.curdir, \"resources\", \"combos\", sys.argv[1], \"gen\") final_vid = os.path.join(os.curdir,", "\"\" if len(sys.argv) > 6: print(\"SHOULD NOT CREATE SHUFFLE\") shuffle=\"noshuffle\" list_images = os.listdir(image_folder)", "CREATE SHUFFLE\") shuffle=\"noshuffle\" list_images = os.listdir(image_folder) driving_video = imageio.mimread(template_video) driving_video = [resize(frame, (256,", "sys import os from demo import load_checkpoints from demo import make_animation from skimage", "relative=True) imageio.mimsave(gen_vid, [img_as_ubyte(frame) for frame in predictions]) combiner = os.path.join(os.curdir, \"resources\", \"combos\", \"createcombo.py\")", "[img_as_ubyte(frame) for frame in predictions]) combiner = os.path.join(os.curdir, \"resources\", \"combos\", \"createcombo.py\") os.system(f\"python3 {combiner}", "\"resources\", \"combos\", sys.argv[1]) image_folder = os.path.join(os.curdir, \"resources\", \"combos\", sys.argv[1], \"images\") template_video = os.path.join(os.curdir,", "= os.path.join(os.curdir, \"resources\", \"combos\", sys.argv[1], sys.argv[2]) template_video_name = sys.argv[2] gen_vid_folder = os.path.join(os.curdir, \"resources\"," ]
[ "language, } ) # Reset id to make sure id does not conflict", ":return: data necessary for API :rtype: dict \"\"\" return { \"id\": event.id, \"start_date\":", "temporary translations of this recurrence recurrence_translations = {} if event.region.fallback_translations_enabled: languages = event.region.active_languages", "event_translation.content, \"available_languages\": event_translation.available_languages, \"thumbnail\": event.icon.url if event.icon else None, \"location\": transform_poi(event.location, location_translation), \"event\":", "from ..decorators import json_response from .locations import transform_poi def transform_event(event): \"\"\" Function to", "event_translation.event.end_date = recurrence_date + event_length # Clear cached property in case url with", "event API endpoint. \"\"\" from copy import deepcopy from datetime import timedelta from", ") # Reset id to make sure id does not conflict with existing", "correctly listed in available languages for recurrence_translation in recurrence_translations.values(): recurrence_translation.event.prefetched_public_translations_by_language_slug = ( recurrence_translations", "generate_unique_slug( **{ \"slug\": f\"{current_slug}-{recurrence_date}\", \"manager\": EventTranslation.objects, \"object_instance\": event_translation, \"foreign_model\": \"event\", \"region\": event.region, \"language\":", "in transform_event_recurrences(event_translation, now): result.append(future_event) return JsonResponse( result, safe=False ) # Turn off Safe-Mode", "``settings.API_EVENTS_MAX_TIME_SPAN_DAYS`` :rtype: Iterator[:class:`~datetime.date`] \"\"\" event = event_translation.event recurrence_rule = event.recurrence_rule if not recurrence_rule:", "+ absolute_url, \"path\": absolute_url, \"title\": event_translation.title, \"modified_gmt\": event_translation.last_updated.strftime(\"%Y-%m-%d %H:%M:%S\"), \"excerpt\": strip_tags(event_translation.content), \"content\": event_translation.content,", "del event_translation.url_prefix except AttributeError: pass recurrence_translations[language.slug] = event_translation # Set the prefetched public", "import deepcopy from datetime import timedelta from django.conf import settings from django.http import", "): return event_length = event.end_date - event.start_date start_date = event.start_date event_translation.id = None", "event_translation = event.get_public_translation(language_slug) if event_translation: if event.end_date >= now: result.append(transform_event_translation(event_translation)) for future_event in", "return event_length = event.end_date - event.start_date start_date = event.start_date event_translation.id = None #", "\"hash\": None, } def transform_event_recurrences(event_translation, today): \"\"\" Yield all future recurrences of the", "AttributeError: pass yield transform_event_translation(event_translation) @json_response # pylint: disable=unused-argument def events(request, region_slug, language_slug): \"\"\"", "def events(request, region_slug, language_slug): \"\"\" List all events of the region and transform", "result.append(future_event) return JsonResponse( result, safe=False ) # Turn off Safe-Mode to allow serializing", "= language event_translation.slug = generate_unique_slug( **{ \"slug\": f\"{current_slug}-{recurrence_date}\", \"manager\": EventTranslation.objects, \"object_instance\": event_translation, \"foreign_model\":", "sure original translation is not affected by changes event_translation = deepcopy(event_translation) # Fake", "language_slug): \"\"\" List all events of the region and transform result into JSON", "event_translation.available_languages, \"thumbnail\": event.icon.url if event.icon else None, \"location\": transform_poi(event.location, location_translation), \"event\": transform_event(event), \"hash\":", "\"timezone\": settings.CURRENT_TIME_ZONE, } def transform_event_translation(event_translation): \"\"\" Function to create a JSON from a", "for event in region.events.prefetch_public_translations().filter(archived=False): event_translation = event.get_public_translation(language_slug) if event_translation: if event.end_date >= now:", "to recurrence date event_translation.event.start_date = recurrence_date event_translation.event.end_date = recurrence_date + event_length # Clear", "be converted :type event_translation: ~integreat_cms.cms.models.events.event_translation.EventTranslation :return: data necessary for API :rtype: dict \"\"\"", "or recurrence_date == start_date: continue # Create all temporary translations of this recurrence", "# pylint: disable=unused-argument def events(request, region_slug, language_slug): \"\"\" List all events of the", "Clear cached property in case url with different language was already calculated before", "up to ``settings.API_EVENTS_MAX_TIME_SPAN_DAYS`` :rtype: Iterator[:class:`~datetime.date`] \"\"\" event = event_translation.event recurrence_rule = event.recurrence_rule if", "today: The first date at which event may be yielded :type today: ~datetime.date", "event: The event which should be converted :type event: ~integreat_cms.cms.models.events.event.Event :return: data necessary", ":type language_slug: str :return: JSON object according to APIv3 events endpoint definition :rtype:", "of the event. :param event_translation: The event translation object which should be converted", "from django.conf import settings from django.http import JsonResponse from django.utils import timezone from", "\"available_languages\": event_translation.available_languages, \"thumbnail\": event.icon.url if event.icon else None, \"location\": transform_poi(event.location, location_translation), \"event\": transform_event(event),", "event translation event_translation.event.id = None # Set date to recurrence date event_translation.event.start_date =", "\"manager\": EventTranslation.objects, \"object_instance\": event_translation, \"foreign_model\": \"event\", \"region\": event.region, \"language\": language, } ) #", "transform_poi def transform_event(event): \"\"\" Function to create a JSON from a single event", "Iterator[:class:`~datetime.date`] \"\"\" event = event_translation.event recurrence_rule = event.recurrence_rule if not recurrence_rule: return #", "already calculated before try: del event_translation.url_prefix except AttributeError: pass recurrence_translations[language.slug] = event_translation #", "return # In order to avoid unnecessary computations, check if any future event", "region_slug: The slug of the requested region :type region_slug: str :param language_slug: The", "should be converted :type event_translation: ~integreat_cms.cms.models.events.event_translation.EventTranslation :return: data necessary for API :rtype: dict", "converted :type event: ~integreat_cms.cms.models.events.event.Event :return: data necessary for API :rtype: dict \"\"\" return", "transform result into JSON :param request: The current request :type request: ~django.http.HttpRequest :param", "from django.http import JsonResponse from django.utils import timezone from django.utils.html import strip_tags from", "\"start_date\": event.start_date, \"end_date\": event.end_date, \"all_day\": event.is_all_day, \"start_time\": event.start_time, \"end_time\": event.end_time, \"recurrence_id\": event.recurrence_rule.id if", "Fake the requested language event_translation.language = language event_translation.slug = generate_unique_slug( **{ \"slug\": f\"{current_slug}-{recurrence_date}\",", "Yield all future recurrences of the event. :param event_translation: The event translation object", "= timezone.now().date() for event in region.events.prefetch_public_translations().filter(archived=False): event_translation = event.get_public_translation(language_slug) if event_translation: if event.end_date", "calculated before try: del event_translation.url_prefix except AttributeError: pass recurrence_translations[language.slug] = event_translation # Set", "today ): return event_length = event.end_date - event.start_date start_date = event.start_date event_translation.id =", "yielded :type today: ~datetime.date :return: An iterator over all future recurrences up to", "recurrences up to ``settings.API_EVENTS_MAX_TIME_SPAN_DAYS`` :rtype: Iterator[:class:`~datetime.date`] \"\"\" event = event_translation.event recurrence_rule = event.recurrence_rule", "of this event for recurrence_date in recurrence_rule.iter_after(start_date): if recurrence_date - max(start_date, today) >", "location_translation = None absolute_url = event_translation.get_absolute_url() return { \"id\": event_translation.id, \"url\": settings.BASE_URL +", "= ( event.location.get_public_translation(event_translation.language.slug) or event.location.best_translation ) else: location_translation = None absolute_url = event_translation.get_absolute_url()", "region and transform result into JSON :param request: The current request :type request:", "slug of the requested region :type region_slug: str :param language_slug: The slug of", "transform_event_recurrences(event_translation, now): result.append(future_event) return JsonResponse( result, safe=False ) # Turn off Safe-Mode to", "} def transform_event_translation(event_translation): \"\"\" Function to create a JSON from a single event_translation", "deepcopy(event_translation) # Fake the requested language event_translation.language = language event_translation.slug = generate_unique_slug( **{", "recurrence_translations[current_language.slug] # Clear cached property in case available languages with different recurrence was", "= deepcopy(event_translation) # Fake the requested language event_translation.language = language event_translation.slug = generate_unique_slug(", "event_translation: if event.end_date >= now: result.append(transform_event_translation(event_translation)) for future_event in transform_event_recurrences(event_translation, now): result.append(future_event) return", "\"\"\" Yield all future recurrences of the event. :param event_translation: The event translation", "if event.end_date >= now: result.append(transform_event_translation(event_translation)) for future_event in transform_event_recurrences(event_translation, now): result.append(future_event) return JsonResponse(", "recurrences of this event for recurrence_date in recurrence_rule.iter_after(start_date): if recurrence_date - max(start_date, today)", "\"location\": transform_poi(event.location, location_translation), \"event\": transform_event(event), \"hash\": None, } def transform_event_recurrences(event_translation, today): \"\"\" Yield", "\"end_time\": event.end_time, \"recurrence_id\": event.recurrence_rule.id if event.recurrence_rule else None, \"timezone\": settings.CURRENT_TIME_ZONE, } def transform_event_translation(event_translation):", "{} if event.region.fallback_translations_enabled: languages = event.region.active_languages else: languages = event.public_languages for language in", "region.get_language_or_404(language_slug, only_active=True) result = [] now = timezone.now().date() for event in region.events.prefetch_public_translations().filter(archived=False): event_translation", "This module includes functions related to the event API endpoint. \"\"\" from copy", "# Create copy in memory to make sure original translation is not affected", "# Calculate all recurrences of this event for recurrence_date in recurrence_rule.iter_after(start_date): if recurrence_date", ":type region_slug: str :param language_slug: The slug of the requested language :type language_slug:", "now = timezone.now().date() for event in region.events.prefetch_public_translations().filter(archived=False): event_translation = event.get_public_translation(language_slug) if event_translation: if", "if recurrence_date - max(start_date, today) > timedelta( days=settings.API_EVENTS_MAX_TIME_SPAN_DAYS ): break if recurrence_date <", "from datetime import timedelta from django.conf import settings from django.http import JsonResponse from", "recurrence_date < today or recurrence_date == start_date: continue # Create all temporary translations", ":rtype: dict \"\"\" event = event_translation.event if event.location: location_translation = ( event.location.get_public_translation(event_translation.language.slug) or", "import timedelta from django.conf import settings from django.http import JsonResponse from django.utils import", "object which should be converted :type event_translation: ~integreat_cms.cms.models.events.event_translation.EventTranslation :return: data necessary for API", "): break if recurrence_date < today or recurrence_date == start_date: continue # Create", ":param today: The first date at which event may be yielded :type today:", "days=settings.API_EVENTS_MAX_TIME_SPAN_DAYS ): break if recurrence_date < today or recurrence_date == start_date: continue #", "available languages with different recurrence was already calculated before try: del event_translation.available_languages except", "definition :rtype: ~django.http.JsonResponse \"\"\" region = request.region # Throw a 404 error when", "languages = event.region.active_languages else: languages = event.public_languages for language in languages: # Create", "event = event_translation.event recurrence_rule = event.recurrence_rule if not recurrence_rule: return # In order", "the case if ( recurrence_rule.recurrence_end_date and recurrence_rule.recurrence_end_date < today ): return event_length =", "prefetched temporary translations event_translation = recurrence_translations[current_language.slug] # Clear cached property in case available", "which should be converted :type event: ~integreat_cms.cms.models.events.event.Event :return: data necessary for API :rtype:", "\"event\", \"region\": event.region, \"language\": language, } ) # Reset id to make sure", "all temporary translations of this recurrence recurrence_translations = {} if event.region.fallback_translations_enabled: languages =", "event which should be converted :type event: ~integreat_cms.cms.models.events.event.Event :return: data necessary for API", "recurrence_date + event_length # Clear cached property in case url with different language", "JSON from a single event object. :param event: The event which should be", "copy in memory to make sure original translation is not affected by changes", "event.end_date >= now: result.append(transform_event_translation(event_translation)) for future_event in transform_event_recurrences(event_translation, now): result.append(future_event) return JsonResponse( result,", "absolute_url, \"title\": event_translation.title, \"modified_gmt\": event_translation.last_updated.strftime(\"%Y-%m-%d %H:%M:%S\"), \"excerpt\": strip_tags(event_translation.content), \"content\": event_translation.content, \"available_languages\": event_translation.available_languages, \"thumbnail\":", "data necessary for API :rtype: dict \"\"\" return { \"id\": event.id, \"start_date\": event.start_date,", "to create a JSON from a single event object. :param event: The event", "except AttributeError: pass recurrence_translations[language.slug] = event_translation # Set the prefetched public translations to", "first date at which event may be yielded :type today: ~datetime.date :return: An", "if that is not the case if ( recurrence_rule.recurrence_end_date and recurrence_rule.recurrence_end_date < today", "recurrence_translations ) # Update translation object with the one with prefetched temporary translations", "are correctly listed in available languages for recurrence_translation in recurrence_translations.values(): recurrence_translation.event.prefetched_public_translations_by_language_slug = (", "to create a JSON from a single event_translation object. :param event_translation: The event", "from a single event object. :param event: The event which should be converted", "event.is_all_day, \"start_time\": event.start_time, \"end_time\": event.end_time, \"recurrence_id\": event.recurrence_rule.id if event.recurrence_rule else None, \"timezone\": settings.CURRENT_TIME_ZONE,", "by changes event_translation = deepcopy(event_translation) # Fake the requested language event_translation.language = language", "event.start_time, \"end_time\": event.end_time, \"recurrence_id\": event.recurrence_rule.id if event.recurrence_rule else None, \"timezone\": settings.CURRENT_TIME_ZONE, } def", "event_translation.slug = generate_unique_slug( **{ \"slug\": f\"{current_slug}-{recurrence_date}\", \"manager\": EventTranslation.objects, \"object_instance\": event_translation, \"foreign_model\": \"event\", \"region\":", "be valid and return early if that is not the case if (", "= event_translation.get_absolute_url() return { \"id\": event_translation.id, \"url\": settings.BASE_URL + absolute_url, \"path\": absolute_url, \"title\":", "the requested language event_translation.language = language event_translation.slug = generate_unique_slug( **{ \"slug\": f\"{current_slug}-{recurrence_date}\", \"manager\":", "event_translation: The event translation object which should be converted :type event_translation: ~integreat_cms.cms.models.events.event_translation.EventTranslation :return:", "< today ): return event_length = event.end_date - event.start_date start_date = event.start_date event_translation.id", ":type today: ~datetime.date :return: An iterator over all future recurrences up to ``settings.API_EVENTS_MAX_TIME_SPAN_DAYS``", ":rtype: Iterator[:class:`~datetime.date`] \"\"\" event = event_translation.event recurrence_rule = event.recurrence_rule if not recurrence_rule: return", "None, \"location\": transform_poi(event.location, location_translation), \"event\": transform_event(event), \"hash\": None, } def transform_event_recurrences(event_translation, today): \"\"\"", "for future_event in transform_event_recurrences(event_translation, now): result.append(future_event) return JsonResponse( result, safe=False ) # Turn", "translation event_translation.event.id = None # Set date to recurrence date event_translation.event.start_date = recurrence_date", "be converted :type event_translation: ~integreat_cms.cms.models.events.event_translation.EventTranslation :param today: The first date at which event", "different recurrence was already calculated before try: del event_translation.available_languages except AttributeError: pass yield", "language event_translation.slug = generate_unique_slug( **{ \"slug\": f\"{current_slug}-{recurrence_date}\", \"manager\": EventTranslation.objects, \"object_instance\": event_translation, \"foreign_model\": \"event\",", "in recurrence_translations.values(): recurrence_translation.event.prefetched_public_translations_by_language_slug = ( recurrence_translations ) # Update translation object with the", "translation is not affected by changes event_translation = deepcopy(event_translation) # Fake the requested", "= None # Set date to recurrence date event_translation.event.start_date = recurrence_date event_translation.event.end_date =", "language event_translation.language = language event_translation.slug = generate_unique_slug( **{ \"slug\": f\"{current_slug}-{recurrence_date}\", \"manager\": EventTranslation.objects, \"object_instance\":", "slug for usage in loop current_language = event_translation.language current_slug = event_translation.slug # Calculate", "\"id\": event.id, \"start_date\": event.start_date, \"end_date\": event.end_date, \"all_day\": event.is_all_day, \"start_time\": event.start_time, \"end_time\": event.end_time, \"recurrence_id\":", "one with prefetched temporary translations event_translation = recurrence_translations[current_language.slug] # Clear cached property in", "strip_tags(event_translation.content), \"content\": event_translation.content, \"available_languages\": event_translation.available_languages, \"thumbnail\": event.icon.url if event.icon else None, \"location\": transform_poi(event.location,", "region :type region_slug: str :param language_slug: The slug of the requested language :type", "in memory to make sure original translation is not affected by changes event_translation", "# Reset id to make sure id does not conflict with existing event", "event_translation.event.id = None # Set date to recurrence date event_translation.event.start_date = recurrence_date event_translation.event.end_date", "timezone from django.utils.html import strip_tags from ...cms.models.events.event_translation import EventTranslation from ...cms.utils.slug_utils import generate_unique_slug", "create a JSON from a single event_translation object. :param event_translation: The event translation", "settings.CURRENT_TIME_ZONE, } def transform_event_translation(event_translation): \"\"\" Function to create a JSON from a single", "else None, \"timezone\": settings.CURRENT_TIME_ZONE, } def transform_event_translation(event_translation): \"\"\" Function to create a JSON", "changes event_translation = deepcopy(event_translation) # Fake the requested language event_translation.language = language event_translation.slug", "that is not the case if ( recurrence_rule.recurrence_end_date and recurrence_rule.recurrence_end_date < today ):", "The slug of the requested region :type region_slug: str :param language_slug: The slug", "# Throw a 404 error when the language does not exist or is", "data necessary for API :rtype: dict \"\"\" event = event_translation.event if event.location: location_translation", "Set date to recurrence date event_translation.event.start_date = recurrence_date event_translation.event.end_date = recurrence_date + event_length", "in loop current_language = event_translation.language current_slug = event_translation.slug # Calculate all recurrences of", "cached property in case url with different language was already calculated before try:", "The slug of the requested language :type language_slug: str :return: JSON object according", "from ...cms.models.events.event_translation import EventTranslation from ...cms.utils.slug_utils import generate_unique_slug from ..decorators import json_response from", "converted :type event_translation: ~integreat_cms.cms.models.events.event_translation.EventTranslation :param today: The first date at which event may", "~datetime.date :return: An iterator over all future recurrences up to ``settings.API_EVENTS_MAX_TIME_SPAN_DAYS`` :rtype: Iterator[:class:`~datetime.date`]", "import json_response from .locations import transform_poi def transform_event(event): \"\"\" Function to create a", "The first date at which event may be yielded :type today: ~datetime.date :return:", "recurrence_date in recurrence_rule.iter_after(start_date): if recurrence_date - max(start_date, today) > timedelta( days=settings.API_EVENTS_MAX_TIME_SPAN_DAYS ): break", "\"end_date\": event.end_date, \"all_day\": event.is_all_day, \"start_time\": event.start_time, \"end_time\": event.end_time, \"recurrence_id\": event.recurrence_rule.id if event.recurrence_rule else", "try: del event_translation.available_languages except AttributeError: pass yield transform_event_translation(event_translation) @json_response # pylint: disable=unused-argument def", "# may be valid and return early if that is not the case", "= event_translation.event if event.location: location_translation = ( event.location.get_public_translation(event_translation.language.slug) or event.location.best_translation ) else: location_translation", "and recurrence_rule.recurrence_end_date < today ): return event_length = event.end_date - event.start_date start_date =", "\"foreign_model\": \"event\", \"region\": event.region, \"language\": language, } ) # Reset id to make", "before try: del event_translation.available_languages except AttributeError: pass yield transform_event_translation(event_translation) @json_response # pylint: disable=unused-argument", "= event.start_date event_translation.id = None # Store language and slug for usage in", "for API :rtype: dict \"\"\" event = event_translation.event if event.location: location_translation = (", "endpoint. \"\"\" from copy import deepcopy from datetime import timedelta from django.conf import", "= None absolute_url = event_translation.get_absolute_url() return { \"id\": event_translation.id, \"url\": settings.BASE_URL + absolute_url,", "Create copy in memory to make sure original translation is not affected by", "now: result.append(transform_event_translation(event_translation)) for future_event in transform_event_recurrences(event_translation, now): result.append(future_event) return JsonResponse( result, safe=False )", "conflict with existing event translation event_translation.event.id = None # Set date to recurrence", "else: languages = event.public_languages for language in languages: # Create copy in memory", "from ...cms.utils.slug_utils import generate_unique_slug from ..decorators import json_response from .locations import transform_poi def", "not conflict with existing event translation event_translation.event.id = None # Set date to", "if event.icon else None, \"location\": transform_poi(event.location, location_translation), \"event\": transform_event(event), \"hash\": None, } def", "request.region # Throw a 404 error when the language does not exist or", "event_translation.id, \"url\": settings.BASE_URL + absolute_url, \"path\": absolute_url, \"title\": event_translation.title, \"modified_gmt\": event_translation.last_updated.strftime(\"%Y-%m-%d %H:%M:%S\"), \"excerpt\":", "json_response from .locations import transform_poi def transform_event(event): \"\"\" Function to create a JSON", "in case available languages with different recurrence was already calculated before try: del", "event object. :param event: The event which should be converted :type event: ~integreat_cms.cms.models.events.event.Event", "event may be yielded :type today: ~datetime.date :return: An iterator over all future", "= event.end_date - event.start_date start_date = event.start_date event_translation.id = None # Store language", "event_translation = deepcopy(event_translation) # Fake the requested language event_translation.language = language event_translation.slug =", "import EventTranslation from ...cms.utils.slug_utils import generate_unique_slug from ..decorators import json_response from .locations import", "result into JSON :param request: The current request :type request: ~django.http.HttpRequest :param region_slug:", "recurrence_rule: return # In order to avoid unnecessary computations, check if any future", "event.id, \"start_date\": event.start_date, \"end_date\": event.end_date, \"all_day\": event.is_all_day, \"start_time\": event.start_time, \"end_time\": event.end_time, \"recurrence_id\": event.recurrence_rule.id", "API :rtype: dict \"\"\" event = event_translation.event if event.location: location_translation = ( event.location.get_public_translation(event_translation.language.slug)", "Set the prefetched public translations to make sure the recurrence translations are correctly", "f\"{current_slug}-{recurrence_date}\", \"manager\": EventTranslation.objects, \"object_instance\": event_translation, \"foreign_model\": \"event\", \"region\": event.region, \"language\": language, } )", "be converted :type event: ~integreat_cms.cms.models.events.event.Event :return: data necessary for API :rtype: dict \"\"\"", "\"all_day\": event.is_all_day, \"start_time\": event.start_time, \"end_time\": event.end_time, \"recurrence_id\": event.recurrence_rule.id if event.recurrence_rule else None, \"timezone\":", "None, } def transform_event_recurrences(event_translation, today): \"\"\" Yield all future recurrences of the event.", "event.start_date event_translation.id = None # Store language and slug for usage in loop", "and return early if that is not the case if ( recurrence_rule.recurrence_end_date and", "language :type language_slug: str :return: JSON object according to APIv3 events endpoint definition", "event_translation.url_prefix except AttributeError: pass recurrence_translations[language.slug] = event_translation # Set the prefetched public translations", "pass yield transform_event_translation(event_translation) @json_response # pylint: disable=unused-argument def events(request, region_slug, language_slug): \"\"\" List", "recurrence_translations[language.slug] = event_translation # Set the prefetched public translations to make sure the", "with the one with prefetched temporary translations event_translation = recurrence_translations[current_language.slug] # Clear cached", "dict \"\"\" event = event_translation.event if event.location: location_translation = ( event.location.get_public_translation(event_translation.language.slug) or event.location.best_translation", "event_translation.event.start_date = recurrence_date event_translation.event.end_date = recurrence_date + event_length # Clear cached property in", "dict \"\"\" return { \"id\": event.id, \"start_date\": event.start_date, \"end_date\": event.end_date, \"all_day\": event.is_all_day, \"start_time\":", "# Set the prefetched public translations to make sure the recurrence translations are", "\"\"\" List all events of the region and transform result into JSON :param", "public translations to make sure the recurrence translations are correctly listed in available", "APIv3 events endpoint definition :rtype: ~django.http.JsonResponse \"\"\" region = request.region # Throw a", "iterator over all future recurrences up to ``settings.API_EVENTS_MAX_TIME_SPAN_DAYS`` :rtype: Iterator[:class:`~datetime.date`] \"\"\" event =", "\"content\": event_translation.content, \"available_languages\": event_translation.available_languages, \"thumbnail\": event.icon.url if event.icon else None, \"location\": transform_poi(event.location, location_translation),", "case available languages with different recurrence was already calculated before try: del event_translation.available_languages", "with different language was already calculated before try: del event_translation.url_prefix except AttributeError: pass", "endpoint definition :rtype: ~django.http.JsonResponse \"\"\" region = request.region # Throw a 404 error", "= recurrence_date event_translation.event.end_date = recurrence_date + event_length # Clear cached property in case", "( event.location.get_public_translation(event_translation.language.slug) or event.location.best_translation ) else: location_translation = None absolute_url = event_translation.get_absolute_url() return", "event.start_date, \"end_date\": event.end_date, \"all_day\": event.is_all_day, \"start_time\": event.start_time, \"end_time\": event.end_time, \"recurrence_id\": event.recurrence_rule.id if event.recurrence_rule", "be yielded :type today: ~datetime.date :return: An iterator over all future recurrences up", ":param region_slug: The slug of the requested region :type region_slug: str :param language_slug:", "disable=unused-argument def events(request, region_slug, language_slug): \"\"\" List all events of the region and", "\"url\": settings.BASE_URL + absolute_url, \"path\": absolute_url, \"title\": event_translation.title, \"modified_gmt\": event_translation.last_updated.strftime(\"%Y-%m-%d %H:%M:%S\"), \"excerpt\": strip_tags(event_translation.content),", "import timezone from django.utils.html import strip_tags from ...cms.models.events.event_translation import EventTranslation from ...cms.utils.slug_utils import", "event.recurrence_rule.id if event.recurrence_rule else None, \"timezone\": settings.CURRENT_TIME_ZONE, } def transform_event_translation(event_translation): \"\"\" Function to", "recurrence_translation in recurrence_translations.values(): recurrence_translation.event.prefetched_public_translations_by_language_slug = ( recurrence_translations ) # Update translation object with", "event_translation, \"foreign_model\": \"event\", \"region\": event.region, \"language\": language, } ) # Reset id to", "= {} if event.region.fallback_translations_enabled: languages = event.region.active_languages else: languages = event.public_languages for language", "sure the recurrence translations are correctly listed in available languages for recurrence_translation in", "[] now = timezone.now().date() for event in region.events.prefetch_public_translations().filter(archived=False): event_translation = event.get_public_translation(language_slug) if event_translation:", "in languages: # Create copy in memory to make sure original translation is", "\"object_instance\": event_translation, \"foreign_model\": \"event\", \"region\": event.region, \"language\": language, } ) # Reset id", "return { \"id\": event_translation.id, \"url\": settings.BASE_URL + absolute_url, \"path\": absolute_url, \"title\": event_translation.title, \"modified_gmt\":", "a JSON from a single event object. :param event: The event which should", "@json_response # pylint: disable=unused-argument def events(request, region_slug, language_slug): \"\"\" List all events of", "List all events of the region and transform result into JSON :param request:", "all recurrences of this event for recurrence_date in recurrence_rule.iter_after(start_date): if recurrence_date - max(start_date,", "\"language\": language, } ) # Reset id to make sure id does not", "continue # Create all temporary translations of this recurrence recurrence_translations = {} if", "does not conflict with existing event translation event_translation.event.id = None # Set date", "\"path\": absolute_url, \"title\": event_translation.title, \"modified_gmt\": event_translation.last_updated.strftime(\"%Y-%m-%d %H:%M:%S\"), \"excerpt\": strip_tags(event_translation.content), \"content\": event_translation.content, \"available_languages\": event_translation.available_languages,", "pylint: disable=unused-argument def events(request, region_slug, language_slug): \"\"\" List all events of the region", "today: ~datetime.date :return: An iterator over all future recurrences up to ``settings.API_EVENTS_MAX_TIME_SPAN_DAYS`` :rtype:", "Calculate all recurrences of this event for recurrence_date in recurrence_rule.iter_after(start_date): if recurrence_date -", "unnecessary computations, check if any future event # may be valid and return", "memory to make sure original translation is not affected by changes event_translation =", "event.location.get_public_translation(event_translation.language.slug) or event.location.best_translation ) else: location_translation = None absolute_url = event_translation.get_absolute_url() return {", "date event_translation.event.start_date = recurrence_date event_translation.event.end_date = recurrence_date + event_length # Clear cached property", "event_translation = recurrence_translations[current_language.slug] # Clear cached property in case available languages with different", ":return: data necessary for API :rtype: dict \"\"\" event = event_translation.event if event.location:", "translation object which should be converted :type event_translation: ~integreat_cms.cms.models.events.event_translation.EventTranslation :param today: The first", "property in case available languages with different recurrence was already calculated before try:", "transform_event_translation(event_translation) @json_response # pylint: disable=unused-argument def events(request, region_slug, language_slug): \"\"\" List all events", "listed in available languages for recurrence_translation in recurrence_translations.values(): recurrence_translation.event.prefetched_public_translations_by_language_slug = ( recurrence_translations )", "early if that is not the case if ( recurrence_rule.recurrence_end_date and recurrence_rule.recurrence_end_date <", "event.end_date - event.start_date start_date = event.start_date event_translation.id = None # Store language and", "region_slug: str :param language_slug: The slug of the requested language :type language_slug: str", "object with the one with prefetched temporary translations event_translation = recurrence_translations[current_language.slug] # Clear", "single event object. :param event: The event which should be converted :type event:", "def transform_event(event): \"\"\" Function to create a JSON from a single event object.", "to avoid unnecessary computations, check if any future event # may be valid", "location_translation = ( event.location.get_public_translation(event_translation.language.slug) or event.location.best_translation ) else: location_translation = None absolute_url =", "The event translation object which should be converted :type event_translation: ~integreat_cms.cms.models.events.event_translation.EventTranslation :param today:", "None, \"timezone\": settings.CURRENT_TIME_ZONE, } def transform_event_translation(event_translation): \"\"\" Function to create a JSON from", "def transform_event_recurrences(event_translation, today): \"\"\" Yield all future recurrences of the event. :param event_translation:", "event.start_date start_date = event.start_date event_translation.id = None # Store language and slug for", "a 404 error when the language does not exist or is disabled region.get_language_or_404(language_slug,", "not affected by changes event_translation = deepcopy(event_translation) # Fake the requested language event_translation.language", ":type event_translation: ~integreat_cms.cms.models.events.event_translation.EventTranslation :return: data necessary for API :rtype: dict \"\"\" event =", "at which event may be yielded :type today: ~datetime.date :return: An iterator over", ".locations import transform_poi def transform_event(event): \"\"\" Function to create a JSON from a", "language does not exist or is disabled region.get_language_or_404(language_slug, only_active=True) result = [] now", "to APIv3 events endpoint definition :rtype: ~django.http.JsonResponse \"\"\" region = request.region # Throw", "of the region and transform result into JSON :param request: The current request", "= event.region.active_languages else: languages = event.public_languages for language in languages: # Create copy", "Store language and slug for usage in loop current_language = event_translation.language current_slug =", "event in region.events.prefetch_public_translations().filter(archived=False): event_translation = event.get_public_translation(language_slug) if event_translation: if event.end_date >= now: result.append(transform_event_translation(event_translation))", "now): result.append(future_event) return JsonResponse( result, safe=False ) # Turn off Safe-Mode to allow", "which should be converted :type event_translation: ~integreat_cms.cms.models.events.event_translation.EventTranslation :param today: The first date at", "# In order to avoid unnecessary computations, check if any future event #", "JSON object according to APIv3 events endpoint definition :rtype: ~django.http.JsonResponse \"\"\" region =", "~integreat_cms.cms.models.events.event_translation.EventTranslation :return: data necessary for API :rtype: dict \"\"\" event = event_translation.event if", "languages with different recurrence was already calculated before try: del event_translation.available_languages except AttributeError:", "languages: # Create copy in memory to make sure original translation is not", "<reponame>Integreat/cms-v2 \"\"\" This module includes functions related to the event API endpoint. \"\"\"", "Throw a 404 error when the language does not exist or is disabled", "return JsonResponse( result, safe=False ) # Turn off Safe-Mode to allow serializing arrays", "== start_date: continue # Create all temporary translations of this recurrence recurrence_translations =", "url with different language was already calculated before try: del event_translation.url_prefix except AttributeError:", "language and slug for usage in loop current_language = event_translation.language current_slug = event_translation.slug", "Clear cached property in case available languages with different recurrence was already calculated", "avoid unnecessary computations, check if any future event # may be valid and", "was already calculated before try: del event_translation.url_prefix except AttributeError: pass recurrence_translations[language.slug] = event_translation", "recurrence translations are correctly listed in available languages for recurrence_translation in recurrence_translations.values(): recurrence_translation.event.prefetched_public_translations_by_language_slug", "transform_poi(event.location, location_translation), \"event\": transform_event(event), \"hash\": None, } def transform_event_recurrences(event_translation, today): \"\"\" Yield all", "%H:%M:%S\"), \"excerpt\": strip_tags(event_translation.content), \"content\": event_translation.content, \"available_languages\": event_translation.available_languages, \"thumbnail\": event.icon.url if event.icon else None,", "JSON :param request: The current request :type request: ~django.http.HttpRequest :param region_slug: The slug", "event_translation: ~integreat_cms.cms.models.events.event_translation.EventTranslation :return: data necessary for API :rtype: dict \"\"\" event = event_translation.event", "events(request, region_slug, language_slug): \"\"\" List all events of the region and transform result", "The event translation object which should be converted :type event_translation: ~integreat_cms.cms.models.events.event_translation.EventTranslation :return: data", "translations to make sure the recurrence translations are correctly listed in available languages", "a single event object. :param event: The event which should be converted :type", "is disabled region.get_language_or_404(language_slug, only_active=True) result = [] now = timezone.now().date() for event in", "..decorators import json_response from .locations import transform_poi def transform_event(event): \"\"\" Function to create", "order to avoid unnecessary computations, check if any future event # may be", "the recurrence translations are correctly listed in available languages for recurrence_translation in recurrence_translations.values():", "event for recurrence_date in recurrence_rule.iter_after(start_date): if recurrence_date - max(start_date, today) > timedelta( days=settings.API_EVENTS_MAX_TIME_SPAN_DAYS", "calculated before try: del event_translation.available_languages except AttributeError: pass yield transform_event_translation(event_translation) @json_response # pylint:", "available languages for recurrence_translation in recurrence_translations.values(): recurrence_translation.event.prefetched_public_translations_by_language_slug = ( recurrence_translations ) # Update", "start_date = event.start_date event_translation.id = None # Store language and slug for usage", "the event. :param event_translation: The event translation object which should be converted :type", "according to APIv3 events endpoint definition :rtype: ~django.http.JsonResponse \"\"\" region = request.region #", "JSON from a single event_translation object. :param event_translation: The event translation object which", "with prefetched temporary translations event_translation = recurrence_translations[current_language.slug] # Clear cached property in case", "the region and transform result into JSON :param request: The current request :type", "import generate_unique_slug from ..decorators import json_response from .locations import transform_poi def transform_event(event): \"\"\"", "may be yielded :type today: ~datetime.date :return: An iterator over all future recurrences", "recurrence_date - max(start_date, today) > timedelta( days=settings.API_EVENTS_MAX_TIME_SPAN_DAYS ): break if recurrence_date < today", "functions related to the event API endpoint. \"\"\" from copy import deepcopy from", "event.location: location_translation = ( event.location.get_public_translation(event_translation.language.slug) or event.location.best_translation ) else: location_translation = None absolute_url", "if recurrence_date < today or recurrence_date == start_date: continue # Create all temporary", "for language in languages: # Create copy in memory to make sure original", ":param event: The event which should be converted :type event: ~integreat_cms.cms.models.events.event.Event :return: data", "\"slug\": f\"{current_slug}-{recurrence_date}\", \"manager\": EventTranslation.objects, \"object_instance\": event_translation, \"foreign_model\": \"event\", \"region\": event.region, \"language\": language, }", "~integreat_cms.cms.models.events.event_translation.EventTranslation :param today: The first date at which event may be yielded :type", "check if any future event # may be valid and return early if", "request: The current request :type request: ~django.http.HttpRequest :param region_slug: The slug of the", "timezone.now().date() for event in region.events.prefetch_public_translations().filter(archived=False): event_translation = event.get_public_translation(language_slug) if event_translation: if event.end_date >=", ":type request: ~django.http.HttpRequest :param region_slug: The slug of the requested region :type region_slug:", "else: location_translation = None absolute_url = event_translation.get_absolute_url() return { \"id\": event_translation.id, \"url\": settings.BASE_URL", "to make sure original translation is not affected by changes event_translation = deepcopy(event_translation)", "event_translation.language = language event_translation.slug = generate_unique_slug( **{ \"slug\": f\"{current_slug}-{recurrence_date}\", \"manager\": EventTranslation.objects, \"object_instance\": event_translation,", "def transform_event_translation(event_translation): \"\"\" Function to create a JSON from a single event_translation object.", "...cms.models.events.event_translation import EventTranslation from ...cms.utils.slug_utils import generate_unique_slug from ..decorators import json_response from .locations", "in available languages for recurrence_translation in recurrence_translations.values(): recurrence_translation.event.prefetched_public_translations_by_language_slug = ( recurrence_translations ) #", "try: del event_translation.url_prefix except AttributeError: pass recurrence_translations[language.slug] = event_translation # Set the prefetched", "event.get_public_translation(language_slug) if event_translation: if event.end_date >= now: result.append(transform_event_translation(event_translation)) for future_event in transform_event_recurrences(event_translation, now):", ") # Update translation object with the one with prefetched temporary translations event_translation", "language was already calculated before try: del event_translation.url_prefix except AttributeError: pass recurrence_translations[language.slug] =", "API :rtype: dict \"\"\" return { \"id\": event.id, \"start_date\": event.start_date, \"end_date\": event.end_date, \"all_day\":", "event_translation # Set the prefetched public translations to make sure the recurrence translations", "del event_translation.available_languages except AttributeError: pass yield transform_event_translation(event_translation) @json_response # pylint: disable=unused-argument def events(request,", "event_length # Clear cached property in case url with different language was already", "from .locations import transform_poi def transform_event(event): \"\"\" Function to create a JSON from", "and slug for usage in loop current_language = event_translation.language current_slug = event_translation.slug #", "EventTranslation.objects, \"object_instance\": event_translation, \"foreign_model\": \"event\", \"region\": event.region, \"language\": language, } ) # Reset", "\"modified_gmt\": event_translation.last_updated.strftime(\"%Y-%m-%d %H:%M:%S\"), \"excerpt\": strip_tags(event_translation.content), \"content\": event_translation.content, \"available_languages\": event_translation.available_languages, \"thumbnail\": event.icon.url if event.icon", "# Set date to recurrence date event_translation.event.start_date = recurrence_date event_translation.event.end_date = recurrence_date +", "object which should be converted :type event_translation: ~integreat_cms.cms.models.events.event_translation.EventTranslation :param today: The first date", "= event_translation.event recurrence_rule = event.recurrence_rule if not recurrence_rule: return # In order to", "any future event # may be valid and return early if that is", "\"event\": transform_event(event), \"hash\": None, } def transform_event_recurrences(event_translation, today): \"\"\" Yield all future recurrences", ":param event_translation: The event translation object which should be converted :type event_translation: ~integreat_cms.cms.models.events.event_translation.EventTranslation", "\"\"\" Function to create a JSON from a single event object. :param event:", "pass recurrence_translations[language.slug] = event_translation # Set the prefetched public translations to make sure", "None # Set date to recurrence date event_translation.event.start_date = recurrence_date event_translation.event.end_date = recurrence_date", "which event may be yielded :type today: ~datetime.date :return: An iterator over all", ":param request: The current request :type request: ~django.http.HttpRequest :param region_slug: The slug of", "a JSON from a single event_translation object. :param event_translation: The event translation object", ":param language_slug: The slug of the requested language :type language_slug: str :return: JSON", "django.conf import settings from django.http import JsonResponse from django.utils import timezone from django.utils.html", "of this recurrence recurrence_translations = {} if event.region.fallback_translations_enabled: languages = event.region.active_languages else: languages", "create a JSON from a single event object. :param event: The event which", "event_translation.get_absolute_url() return { \"id\": event_translation.id, \"url\": settings.BASE_URL + absolute_url, \"path\": absolute_url, \"title\": event_translation.title,", "= ( recurrence_translations ) # Update translation object with the one with prefetched", "in region.events.prefetch_public_translations().filter(archived=False): event_translation = event.get_public_translation(language_slug) if event_translation: if event.end_date >= now: result.append(transform_event_translation(event_translation)) for", "\"\"\" return { \"id\": event.id, \"start_date\": event.start_date, \"end_date\": event.end_date, \"all_day\": event.is_all_day, \"start_time\": event.start_time,", "recurrence_translations.values(): recurrence_translation.event.prefetched_public_translations_by_language_slug = ( recurrence_translations ) # Update translation object with the one", "None absolute_url = event_translation.get_absolute_url() return { \"id\": event_translation.id, \"url\": settings.BASE_URL + absolute_url, \"path\":", ":type event_translation: ~integreat_cms.cms.models.events.event_translation.EventTranslation :param today: The first date at which event may be", "temporary translations event_translation = recurrence_translations[current_language.slug] # Clear cached property in case available languages", "recurrence_translation.event.prefetched_public_translations_by_language_slug = ( recurrence_translations ) # Update translation object with the one with", "when the language does not exist or is disabled region.get_language_or_404(language_slug, only_active=True) result =", "different language was already calculated before try: del event_translation.url_prefix except AttributeError: pass recurrence_translations[language.slug]", "from django.utils import timezone from django.utils.html import strip_tags from ...cms.models.events.event_translation import EventTranslation from", "object. :param event_translation: The event translation object which should be converted :type event_translation:", "of the requested region :type region_slug: str :param language_slug: The slug of the", "computations, check if any future event # may be valid and return early", "only_active=True) result = [] now = timezone.now().date() for event in region.events.prefetch_public_translations().filter(archived=False): event_translation =", "for recurrence_translation in recurrence_translations.values(): recurrence_translation.event.prefetched_public_translations_by_language_slug = ( recurrence_translations ) # Update translation object", "= event.recurrence_rule if not recurrence_rule: return # In order to avoid unnecessary computations,", "= recurrence_translations[current_language.slug] # Clear cached property in case available languages with different recurrence", "the prefetched public translations to make sure the recurrence translations are correctly listed", "future recurrences up to ``settings.API_EVENTS_MAX_TIME_SPAN_DAYS`` :rtype: Iterator[:class:`~datetime.date`] \"\"\" event = event_translation.event recurrence_rule =", "property in case url with different language was already calculated before try: del", "not exist or is disabled region.get_language_or_404(language_slug, only_active=True) result = [] now = timezone.now().date()", "= recurrence_date + event_length # Clear cached property in case url with different", "event.end_time, \"recurrence_id\": event.recurrence_rule.id if event.recurrence_rule else None, \"timezone\": settings.CURRENT_TIME_ZONE, } def transform_event_translation(event_translation): \"\"\"", "recurrence_rule.iter_after(start_date): if recurrence_date - max(start_date, today) > timedelta( days=settings.API_EVENTS_MAX_TIME_SPAN_DAYS ): break if recurrence_date", "event = event_translation.event if event.location: location_translation = ( event.location.get_public_translation(event_translation.language.slug) or event.location.best_translation ) else:", "valid and return early if that is not the case if ( recurrence_rule.recurrence_end_date", "recurrence_rule.recurrence_end_date and recurrence_rule.recurrence_end_date < today ): return event_length = event.end_date - event.start_date start_date", "# Store language and slug for usage in loop current_language = event_translation.language current_slug", "request: ~django.http.HttpRequest :param region_slug: The slug of the requested region :type region_slug: str", "event translation object which should be converted :type event_translation: ~integreat_cms.cms.models.events.event_translation.EventTranslation :param today: The", "EventTranslation from ...cms.utils.slug_utils import generate_unique_slug from ..decorators import json_response from .locations import transform_poi", "existing event translation event_translation.event.id = None # Set date to recurrence date event_translation.event.start_date", "Reset id to make sure id does not conflict with existing event translation", "necessary for API :rtype: dict \"\"\" event = event_translation.event if event.location: location_translation =", "this event for recurrence_date in recurrence_rule.iter_after(start_date): if recurrence_date - max(start_date, today) > timedelta(", "event.region, \"language\": language, } ) # Reset id to make sure id does", "\"\"\" event = event_translation.event recurrence_rule = event.recurrence_rule if not recurrence_rule: return # In", "all future recurrences up to ``settings.API_EVENTS_MAX_TIME_SPAN_DAYS`` :rtype: Iterator[:class:`~datetime.date`] \"\"\" event = event_translation.event recurrence_rule", "if event.region.fallback_translations_enabled: languages = event.region.active_languages else: languages = event.public_languages for language in languages:", "( recurrence_rule.recurrence_end_date and recurrence_rule.recurrence_end_date < today ): return event_length = event.end_date - event.start_date", "to make sure id does not conflict with existing event translation event_translation.event.id =", "event_translation.available_languages except AttributeError: pass yield transform_event_translation(event_translation) @json_response # pylint: disable=unused-argument def events(request, region_slug,", "from copy import deepcopy from datetime import timedelta from django.conf import settings from", "yield transform_event_translation(event_translation) @json_response # pylint: disable=unused-argument def events(request, region_slug, language_slug): \"\"\" List all", "before try: del event_translation.url_prefix except AttributeError: pass recurrence_translations[language.slug] = event_translation # Set the", "translation object which should be converted :type event_translation: ~integreat_cms.cms.models.events.event_translation.EventTranslation :return: data necessary for", "import JsonResponse from django.utils import timezone from django.utils.html import strip_tags from ...cms.models.events.event_translation import", "= event.get_public_translation(language_slug) if event_translation: if event.end_date >= now: result.append(transform_event_translation(event_translation)) for future_event in transform_event_recurrences(event_translation,", ":rtype: ~django.http.JsonResponse \"\"\" region = request.region # Throw a 404 error when the", "404 error when the language does not exist or is disabled region.get_language_or_404(language_slug, only_active=True)", "event translation object which should be converted :type event_translation: ~integreat_cms.cms.models.events.event_translation.EventTranslation :return: data necessary", "event.region.active_languages else: languages = event.public_languages for language in languages: # Create copy in", "event.end_date, \"all_day\": event.is_all_day, \"start_time\": event.start_time, \"end_time\": event.end_time, \"recurrence_id\": event.recurrence_rule.id if event.recurrence_rule else None,", "sure id does not conflict with existing event translation event_translation.event.id = None #", "into JSON :param request: The current request :type request: ~django.http.HttpRequest :param region_slug: The", "module includes functions related to the event API endpoint. \"\"\" from copy import", "event.recurrence_rule if not recurrence_rule: return # In order to avoid unnecessary computations, check", "not the case if ( recurrence_rule.recurrence_end_date and recurrence_rule.recurrence_end_date < today ): return event_length", "object according to APIv3 events endpoint definition :rtype: ~django.http.JsonResponse \"\"\" region = request.region", "error when the language does not exist or is disabled region.get_language_or_404(language_slug, only_active=True) result", "# Create all temporary translations of this recurrence recurrence_translations = {} if event.region.fallback_translations_enabled:", "event_translation.id = None # Store language and slug for usage in loop current_language", "which should be converted :type event_translation: ~integreat_cms.cms.models.events.event_translation.EventTranslation :return: data necessary for API :rtype:", "event.public_languages for language in languages: # Create copy in memory to make sure", "event_translation.language current_slug = event_translation.slug # Calculate all recurrences of this event for recurrence_date", "recurrence_date == start_date: continue # Create all temporary translations of this recurrence recurrence_translations", "may be valid and return early if that is not the case if", "the language does not exist or is disabled region.get_language_or_404(language_slug, only_active=True) result = []", "timedelta from django.conf import settings from django.http import JsonResponse from django.utils import timezone", "Function to create a JSON from a single event object. :param event: The", "current_slug = event_translation.slug # Calculate all recurrences of this event for recurrence_date in", "\"\"\" region = request.region # Throw a 404 error when the language does", "for API :rtype: dict \"\"\" return { \"id\": event.id, \"start_date\": event.start_date, \"end_date\": event.end_date,", "should be converted :type event_translation: ~integreat_cms.cms.models.events.event_translation.EventTranslation :param today: The first date at which", "event_translation.last_updated.strftime(\"%Y-%m-%d %H:%M:%S\"), \"excerpt\": strip_tags(event_translation.content), \"content\": event_translation.content, \"available_languages\": event_translation.available_languages, \"thumbnail\": event.icon.url if event.icon else", "} ) # Reset id to make sure id does not conflict with", "str :return: JSON object according to APIv3 events endpoint definition :rtype: ~django.http.JsonResponse \"\"\"", "In order to avoid unnecessary computations, check if any future event # may", "if event.location: location_translation = ( event.location.get_public_translation(event_translation.language.slug) or event.location.best_translation ) else: location_translation = None", "future recurrences of the event. :param event_translation: The event translation object which should", "event.icon else None, \"location\": transform_poi(event.location, location_translation), \"event\": transform_event(event), \"hash\": None, } def transform_event_recurrences(event_translation,", "event: ~integreat_cms.cms.models.events.event.Event :return: data necessary for API :rtype: dict \"\"\" return { \"id\":", "import transform_poi def transform_event(event): \"\"\" Function to create a JSON from a single", "event.recurrence_rule else None, \"timezone\": settings.CURRENT_TIME_ZONE, } def transform_event_translation(event_translation): \"\"\" Function to create a", "event.location.best_translation ) else: location_translation = None absolute_url = event_translation.get_absolute_url() return { \"id\": event_translation.id,", "in recurrence_rule.iter_after(start_date): if recurrence_date - max(start_date, today) > timedelta( days=settings.API_EVENTS_MAX_TIME_SPAN_DAYS ): break if", "exist or is disabled region.get_language_or_404(language_slug, only_active=True) result = [] now = timezone.now().date() for", "with different recurrence was already calculated before try: del event_translation.available_languages except AttributeError: pass", "= event_translation.language current_slug = event_translation.slug # Calculate all recurrences of this event for", "if event.recurrence_rule else None, \"timezone\": settings.CURRENT_TIME_ZONE, } def transform_event_translation(event_translation): \"\"\" Function to create", "deepcopy from datetime import timedelta from django.conf import settings from django.http import JsonResponse", "Create all temporary translations of this recurrence recurrence_translations = {} if event.region.fallback_translations_enabled: languages", "import strip_tags from ...cms.models.events.event_translation import EventTranslation from ...cms.utils.slug_utils import generate_unique_slug from ..decorators import", "The current request :type request: ~django.http.HttpRequest :param region_slug: The slug of the requested", "\"\"\" Function to create a JSON from a single event_translation object. :param event_translation:", "copy import deepcopy from datetime import timedelta from django.conf import settings from django.http", "languages for recurrence_translation in recurrence_translations.values(): recurrence_translation.event.prefetched_public_translations_by_language_slug = ( recurrence_translations ) # Update translation", ") else: location_translation = None absolute_url = event_translation.get_absolute_url() return { \"id\": event_translation.id, \"url\":", "the event API endpoint. \"\"\" from copy import deepcopy from datetime import timedelta", "transform_event(event), \"hash\": None, } def transform_event_recurrences(event_translation, today): \"\"\" Yield all future recurrences of", "break if recurrence_date < today or recurrence_date == start_date: continue # Create all", "recurrence_date event_translation.event.end_date = recurrence_date + event_length # Clear cached property in case url", "event_translation.event if event.location: location_translation = ( event.location.get_public_translation(event_translation.language.slug) or event.location.best_translation ) else: location_translation =", "~django.http.HttpRequest :param region_slug: The slug of the requested region :type region_slug: str :param", "The event which should be converted :type event: ~integreat_cms.cms.models.events.event.Event :return: data necessary for", "for usage in loop current_language = event_translation.language current_slug = event_translation.slug # Calculate all", "if any future event # may be valid and return early if that", "to ``settings.API_EVENTS_MAX_TIME_SPAN_DAYS`` :rtype: Iterator[:class:`~datetime.date`] \"\"\" event = event_translation.event recurrence_rule = event.recurrence_rule if not", "converted :type event_translation: ~integreat_cms.cms.models.events.event_translation.EventTranslation :return: data necessary for API :rtype: dict \"\"\" event", "event.region.fallback_translations_enabled: languages = event.region.active_languages else: languages = event.public_languages for language in languages: #", "event_translation object. :param event_translation: The event translation object which should be converted :type", "AttributeError: pass recurrence_translations[language.slug] = event_translation # Set the prefetched public translations to make", "recurrence recurrence_translations = {} if event.region.fallback_translations_enabled: languages = event.region.active_languages else: languages = event.public_languages", "to the event API endpoint. \"\"\" from copy import deepcopy from datetime import", "case if ( recurrence_rule.recurrence_end_date and recurrence_rule.recurrence_end_date < today ): return event_length = event.end_date", "event. :param event_translation: The event translation object which should be converted :type event_translation:", "a single event_translation object. :param event_translation: The event translation object which should be", "loop current_language = event_translation.language current_slug = event_translation.slug # Calculate all recurrences of this", "result.append(transform_event_translation(event_translation)) for future_event in transform_event_recurrences(event_translation, now): result.append(future_event) return JsonResponse( result, safe=False ) #", "= None # Store language and slug for usage in loop current_language =", "in case url with different language was already calculated before try: del event_translation.url_prefix", "language_slug: str :return: JSON object according to APIv3 events endpoint definition :rtype: ~django.http.JsonResponse", "transform_event(event): \"\"\" Function to create a JSON from a single event object. :param", "if ( recurrence_rule.recurrence_end_date and recurrence_rule.recurrence_end_date < today ): return event_length = event.end_date -", "or is disabled region.get_language_or_404(language_slug, only_active=True) result = [] now = timezone.now().date() for event", "current request :type request: ~django.http.HttpRequest :param region_slug: The slug of the requested region", "event_translation: ~integreat_cms.cms.models.events.event_translation.EventTranslation :param today: The first date at which event may be yielded", "\"\"\" This module includes functions related to the event API endpoint. \"\"\" from", ":return: JSON object according to APIv3 events endpoint definition :rtype: ~django.http.JsonResponse \"\"\" region", "= request.region # Throw a 404 error when the language does not exist", "all events of the region and transform result into JSON :param request: The", "absolute_url = event_translation.get_absolute_url() return { \"id\": event_translation.id, \"url\": settings.BASE_URL + absolute_url, \"path\": absolute_url,", "~integreat_cms.cms.models.events.event.Event :return: data necessary for API :rtype: dict \"\"\" return { \"id\": event.id,", "...cms.utils.slug_utils import generate_unique_slug from ..decorators import json_response from .locations import transform_poi def transform_event(event):", "result = [] now = timezone.now().date() for event in region.events.prefetch_public_translations().filter(archived=False): event_translation = event.get_public_translation(language_slug)", "} def transform_event_recurrences(event_translation, today): \"\"\" Yield all future recurrences of the event. :param", "\"title\": event_translation.title, \"modified_gmt\": event_translation.last_updated.strftime(\"%Y-%m-%d %H:%M:%S\"), \"excerpt\": strip_tags(event_translation.content), \"content\": event_translation.content, \"available_languages\": event_translation.available_languages, \"thumbnail\": event.icon.url", "= generate_unique_slug( **{ \"slug\": f\"{current_slug}-{recurrence_date}\", \"manager\": EventTranslation.objects, \"object_instance\": event_translation, \"foreign_model\": \"event\", \"region\": event.region,", "events endpoint definition :rtype: ~django.http.JsonResponse \"\"\" region = request.region # Throw a 404", "return { \"id\": event.id, \"start_date\": event.start_date, \"end_date\": event.end_date, \"all_day\": event.is_all_day, \"start_time\": event.start_time, \"end_time\":", "and transform result into JSON :param request: The current request :type request: ~django.http.HttpRequest", "id does not conflict with existing event translation event_translation.event.id = None # Set", "{ \"id\": event_translation.id, \"url\": settings.BASE_URL + absolute_url, \"path\": absolute_url, \"title\": event_translation.title, \"modified_gmt\": event_translation.last_updated.strftime(\"%Y-%m-%d", "= event_translation.slug # Calculate all recurrences of this event for recurrence_date in recurrence_rule.iter_after(start_date):", "future_event in transform_event_recurrences(event_translation, now): result.append(future_event) return JsonResponse( result, safe=False ) # Turn off", "affected by changes event_translation = deepcopy(event_translation) # Fake the requested language event_translation.language =", "recurrences of the event. :param event_translation: The event translation object which should be", "over all future recurrences up to ``settings.API_EVENTS_MAX_TIME_SPAN_DAYS`` :rtype: Iterator[:class:`~datetime.date`] \"\"\" event = event_translation.event", "translation object with the one with prefetched temporary translations event_translation = recurrence_translations[current_language.slug] #", "**{ \"slug\": f\"{current_slug}-{recurrence_date}\", \"manager\": EventTranslation.objects, \"object_instance\": event_translation, \"foreign_model\": \"event\", \"region\": event.region, \"language\": language,", ":rtype: dict \"\"\" return { \"id\": event.id, \"start_date\": event.start_date, \"end_date\": event.end_date, \"all_day\": event.is_all_day,", "not recurrence_rule: return # In order to avoid unnecessary computations, check if any", "> timedelta( days=settings.API_EVENTS_MAX_TIME_SPAN_DAYS ): break if recurrence_date < today or recurrence_date == start_date:", "- event.start_date start_date = event.start_date event_translation.id = None # Store language and slug", ":type event: ~integreat_cms.cms.models.events.event.Event :return: data necessary for API :rtype: dict \"\"\" return {", "API endpoint. \"\"\" from copy import deepcopy from datetime import timedelta from django.conf", "today): \"\"\" Yield all future recurrences of the event. :param event_translation: The event", "requested language event_translation.language = language event_translation.slug = generate_unique_slug( **{ \"slug\": f\"{current_slug}-{recurrence_date}\", \"manager\": EventTranslation.objects,", "region = request.region # Throw a 404 error when the language does not", "of the requested language :type language_slug: str :return: JSON object according to APIv3", "# Update translation object with the one with prefetched temporary translations event_translation =", "make sure id does not conflict with existing event translation event_translation.event.id = None", "recurrence_translations = {} if event.region.fallback_translations_enabled: languages = event.region.active_languages else: languages = event.public_languages for", "language in languages: # Create copy in memory to make sure original translation", "with existing event translation event_translation.event.id = None # Set date to recurrence date", "from a single event_translation object. :param event_translation: The event translation object which should", "timedelta( days=settings.API_EVENTS_MAX_TIME_SPAN_DAYS ): break if recurrence_date < today or recurrence_date == start_date: continue", "\"recurrence_id\": event.recurrence_rule.id if event.recurrence_rule else None, \"timezone\": settings.CURRENT_TIME_ZONE, } def transform_event_translation(event_translation): \"\"\" Function", "event_translation: The event translation object which should be converted :type event_translation: ~integreat_cms.cms.models.events.event_translation.EventTranslation :param", "transform_event_recurrences(event_translation, today): \"\"\" Yield all future recurrences of the event. :param event_translation: The", "today or recurrence_date == start_date: continue # Create all temporary translations of this", "date at which event may be yielded :type today: ~datetime.date :return: An iterator", "- max(start_date, today) > timedelta( days=settings.API_EVENTS_MAX_TIME_SPAN_DAYS ): break if recurrence_date < today or", "django.utils import timezone from django.utils.html import strip_tags from ...cms.models.events.event_translation import EventTranslation from ...cms.utils.slug_utils", "the requested language :type language_slug: str :return: JSON object according to APIv3 events", "None # Store language and slug for usage in loop current_language = event_translation.language", "is not affected by changes event_translation = deepcopy(event_translation) # Fake the requested language", "today) > timedelta( days=settings.API_EVENTS_MAX_TIME_SPAN_DAYS ): break if recurrence_date < today or recurrence_date ==", "# Fake the requested language event_translation.language = language event_translation.slug = generate_unique_slug( **{ \"slug\":", "except AttributeError: pass yield transform_event_translation(event_translation) @json_response # pylint: disable=unused-argument def events(request, region_slug, language_slug):", "\"start_time\": event.start_time, \"end_time\": event.end_time, \"recurrence_id\": event.recurrence_rule.id if event.recurrence_rule else None, \"timezone\": settings.CURRENT_TIME_ZONE, }", "return early if that is not the case if ( recurrence_rule.recurrence_end_date and recurrence_rule.recurrence_end_date", "transform_event_translation(event_translation): \"\"\" Function to create a JSON from a single event_translation object. :param", "was already calculated before try: del event_translation.available_languages except AttributeError: pass yield transform_event_translation(event_translation) @json_response", "else None, \"location\": transform_poi(event.location, location_translation), \"event\": transform_event(event), \"hash\": None, } def transform_event_recurrences(event_translation, today):", "for recurrence_date in recurrence_rule.iter_after(start_date): if recurrence_date - max(start_date, today) > timedelta( days=settings.API_EVENTS_MAX_TIME_SPAN_DAYS ):", "generate_unique_slug from ..decorators import json_response from .locations import transform_poi def transform_event(event): \"\"\" Function", "related to the event API endpoint. \"\"\" from copy import deepcopy from datetime", "if event_translation: if event.end_date >= now: result.append(transform_event_translation(event_translation)) for future_event in transform_event_recurrences(event_translation, now): result.append(future_event)", "translations are correctly listed in available languages for recurrence_translation in recurrence_translations.values(): recurrence_translation.event.prefetched_public_translations_by_language_slug =", "strip_tags from ...cms.models.events.event_translation import EventTranslation from ...cms.utils.slug_utils import generate_unique_slug from ..decorators import json_response", "region.events.prefetch_public_translations().filter(archived=False): event_translation = event.get_public_translation(language_slug) if event_translation: if event.end_date >= now: result.append(transform_event_translation(event_translation)) for future_event", "should be converted :type event: ~integreat_cms.cms.models.events.event.Event :return: data necessary for API :rtype: dict", "make sure the recurrence translations are correctly listed in available languages for recurrence_translation", "object. :param event: The event which should be converted :type event: ~integreat_cms.cms.models.events.event.Event :return:", "does not exist or is disabled region.get_language_or_404(language_slug, only_active=True) result = [] now =", "event # may be valid and return early if that is not the", "~django.http.JsonResponse \"\"\" region = request.region # Throw a 404 error when the language", "= [] now = timezone.now().date() for event in region.events.prefetch_public_translations().filter(archived=False): event_translation = event.get_public_translation(language_slug) if", "to make sure the recurrence translations are correctly listed in available languages for", "the one with prefetched temporary translations event_translation = recurrence_translations[current_language.slug] # Clear cached property", "is not the case if ( recurrence_rule.recurrence_end_date and recurrence_rule.recurrence_end_date < today ): return", "already calculated before try: del event_translation.available_languages except AttributeError: pass yield transform_event_translation(event_translation) @json_response #", "case url with different language was already calculated before try: del event_translation.url_prefix except", "event_translation.event recurrence_rule = event.recurrence_rule if not recurrence_rule: return # In order to avoid", "{ \"id\": event.id, \"start_date\": event.start_date, \"end_date\": event.end_date, \"all_day\": event.is_all_day, \"start_time\": event.start_time, \"end_time\": event.end_time,", "An iterator over all future recurrences up to ``settings.API_EVENTS_MAX_TIME_SPAN_DAYS`` :rtype: Iterator[:class:`~datetime.date`] \"\"\" event", "prefetched public translations to make sure the recurrence translations are correctly listed in", ">= now: result.append(transform_event_translation(event_translation)) for future_event in transform_event_recurrences(event_translation, now): result.append(future_event) return JsonResponse( result, safe=False", "request :type request: ~django.http.HttpRequest :param region_slug: The slug of the requested region :type", "disabled region.get_language_or_404(language_slug, only_active=True) result = [] now = timezone.now().date() for event in region.events.prefetch_public_translations().filter(archived=False):", "includes functions related to the event API endpoint. \"\"\" from copy import deepcopy", "\"\"\" event = event_translation.event if event.location: location_translation = ( event.location.get_public_translation(event_translation.language.slug) or event.location.best_translation )", "django.utils.html import strip_tags from ...cms.models.events.event_translation import EventTranslation from ...cms.utils.slug_utils import generate_unique_slug from ..decorators", "settings from django.http import JsonResponse from django.utils import timezone from django.utils.html import strip_tags", "all future recurrences of the event. :param event_translation: The event translation object which", "usage in loop current_language = event_translation.language current_slug = event_translation.slug # Calculate all recurrences", "event.icon.url if event.icon else None, \"location\": transform_poi(event.location, location_translation), \"event\": transform_event(event), \"hash\": None, }", "# Clear cached property in case url with different language was already calculated", "Function to create a JSON from a single event_translation object. :param event_translation: The", "date to recurrence date event_translation.event.start_date = recurrence_date event_translation.event.end_date = recurrence_date + event_length #", "# Clear cached property in case available languages with different recurrence was already", "( recurrence_translations ) # Update translation object with the one with prefetched temporary", "recurrence was already calculated before try: del event_translation.available_languages except AttributeError: pass yield transform_event_translation(event_translation)", "translations event_translation = recurrence_translations[current_language.slug] # Clear cached property in case available languages with", "recurrence_rule.recurrence_end_date < today ): return event_length = event.end_date - event.start_date start_date = event.start_date", "< today or recurrence_date == start_date: continue # Create all temporary translations of", "make sure original translation is not affected by changes event_translation = deepcopy(event_translation) #", "str :param language_slug: The slug of the requested language :type language_slug: str :return:", "language_slug: The slug of the requested language :type language_slug: str :return: JSON object", "\"\"\" from copy import deepcopy from datetime import timedelta from django.conf import settings", "datetime import timedelta from django.conf import settings from django.http import JsonResponse from django.utils", "event_length = event.end_date - event.start_date start_date = event.start_date event_translation.id = None # Store", "requested language :type language_slug: str :return: JSON object according to APIv3 events endpoint", "id to make sure id does not conflict with existing event translation event_translation.event.id", "\"thumbnail\": event.icon.url if event.icon else None, \"location\": transform_poi(event.location, location_translation), \"event\": transform_event(event), \"hash\": None,", "\"excerpt\": strip_tags(event_translation.content), \"content\": event_translation.content, \"available_languages\": event_translation.available_languages, \"thumbnail\": event.icon.url if event.icon else None, \"location\":", "settings.BASE_URL + absolute_url, \"path\": absolute_url, \"title\": event_translation.title, \"modified_gmt\": event_translation.last_updated.strftime(\"%Y-%m-%d %H:%M:%S\"), \"excerpt\": strip_tags(event_translation.content), \"content\":", "the requested region :type region_slug: str :param language_slug: The slug of the requested", "languages = event.public_languages for language in languages: # Create copy in memory to", "events of the region and transform result into JSON :param request: The current", "event_translation.slug # Calculate all recurrences of this event for recurrence_date in recurrence_rule.iter_after(start_date): if", "\"region\": event.region, \"language\": language, } ) # Reset id to make sure id", "location_translation), \"event\": transform_event(event), \"hash\": None, } def transform_event_recurrences(event_translation, today): \"\"\" Yield all future", "slug of the requested language :type language_slug: str :return: JSON object according to", "\"id\": event_translation.id, \"url\": settings.BASE_URL + absolute_url, \"path\": absolute_url, \"title\": event_translation.title, \"modified_gmt\": event_translation.last_updated.strftime(\"%Y-%m-%d %H:%M:%S\"),", ":return: An iterator over all future recurrences up to ``settings.API_EVENTS_MAX_TIME_SPAN_DAYS`` :rtype: Iterator[:class:`~datetime.date`] \"\"\"", "original translation is not affected by changes event_translation = deepcopy(event_translation) # Fake the", "django.http import JsonResponse from django.utils import timezone from django.utils.html import strip_tags from ...cms.models.events.event_translation", "if not recurrence_rule: return # In order to avoid unnecessary computations, check if", "requested region :type region_slug: str :param language_slug: The slug of the requested language", "or event.location.best_translation ) else: location_translation = None absolute_url = event_translation.get_absolute_url() return { \"id\":", "from django.utils.html import strip_tags from ...cms.models.events.event_translation import EventTranslation from ...cms.utils.slug_utils import generate_unique_slug from", "event_translation.title, \"modified_gmt\": event_translation.last_updated.strftime(\"%Y-%m-%d %H:%M:%S\"), \"excerpt\": strip_tags(event_translation.content), \"content\": event_translation.content, \"available_languages\": event_translation.available_languages, \"thumbnail\": event.icon.url if", "import settings from django.http import JsonResponse from django.utils import timezone from django.utils.html import", "cached property in case available languages with different recurrence was already calculated before", "single event_translation object. :param event_translation: The event translation object which should be converted", "recurrence_rule = event.recurrence_rule if not recurrence_rule: return # In order to avoid unnecessary", "Update translation object with the one with prefetched temporary translations event_translation = recurrence_translations[current_language.slug]", "future event # may be valid and return early if that is not", "= event_translation # Set the prefetched public translations to make sure the recurrence", "recurrence date event_translation.event.start_date = recurrence_date event_translation.event.end_date = recurrence_date + event_length # Clear cached", "max(start_date, today) > timedelta( days=settings.API_EVENTS_MAX_TIME_SPAN_DAYS ): break if recurrence_date < today or recurrence_date", "current_language = event_translation.language current_slug = event_translation.slug # Calculate all recurrences of this event", "start_date: continue # Create all temporary translations of this recurrence recurrence_translations = {}", "absolute_url, \"path\": absolute_url, \"title\": event_translation.title, \"modified_gmt\": event_translation.last_updated.strftime(\"%Y-%m-%d %H:%M:%S\"), \"excerpt\": strip_tags(event_translation.content), \"content\": event_translation.content, \"available_languages\":", "necessary for API :rtype: dict \"\"\" return { \"id\": event.id, \"start_date\": event.start_date, \"end_date\":", "+ event_length # Clear cached property in case url with different language was", "= event.public_languages for language in languages: # Create copy in memory to make", "region_slug, language_slug): \"\"\" List all events of the region and transform result into", "translations of this recurrence recurrence_translations = {} if event.region.fallback_translations_enabled: languages = event.region.active_languages else:", "this recurrence recurrence_translations = {} if event.region.fallback_translations_enabled: languages = event.region.active_languages else: languages =", "JsonResponse from django.utils import timezone from django.utils.html import strip_tags from ...cms.models.events.event_translation import EventTranslation" ]
[ "file_name = r'/Users/andrewlowe/yipitdata/Q4_2013_Groupon_North_America_Data_XLSX.xlsx' df = pd.read_excel(file_name) # Q4_13_NA dataframe Q4_13_NA = df #", "are 138534 items in the dataframe. Q4_13_NA.shape Q4_13_NA.describe() Q4_13_NA.describe(exclude='number') Q4_13_NA[] segment_local = Q4_13_NA[(Q4_13_NA['Segment']", "7) There are 138534 items in the dataframe. Q4_13_NA.shape Q4_13_NA.describe() Q4_13_NA.describe(exclude='number') Q4_13_NA[] segment_local", "Q4_13_NA.shape Q4_13_NA.describe() Q4_13_NA.describe(exclude='number') Q4_13_NA[] segment_local = Q4_13_NA[(Q4_13_NA['Segment'] == 'Local')] segment.shape segment = Q4_13_NA[(Q4_13_NA['Segment']", "dataframe. Q4_13_NA.shape Q4_13_NA.describe() Q4_13_NA.describe(exclude='number') Q4_13_NA[] segment_local = Q4_13_NA[(Q4_13_NA['Segment'] == 'Local')] segment.shape segment =", "Q4_13_NA = df # (138534, 7) There are 138534 items in the dataframe.", "# (138534, 7) There are 138534 items in the dataframe. Q4_13_NA.shape Q4_13_NA.describe() Q4_13_NA.describe(exclude='number')", "= r'/Users/andrewlowe/yipitdata/Q4_2013_Groupon_North_America_Data_XLSX.xlsx' df = pd.read_excel(file_name) # Q4_13_NA dataframe Q4_13_NA = df # (138534,", "Q4_13_NA dataframe Q4_13_NA = df # (138534, 7) There are 138534 items in", "df # (138534, 7) There are 138534 items in the dataframe. Q4_13_NA.shape Q4_13_NA.describe()", "df = pd.read_excel(file_name) # Q4_13_NA dataframe Q4_13_NA = df # (138534, 7) There", "in the dataframe. Q4_13_NA.shape Q4_13_NA.describe() Q4_13_NA.describe(exclude='number') Q4_13_NA[] segment_local = Q4_13_NA[(Q4_13_NA['Segment'] == 'Local')] segment.shape", "There are 138534 items in the dataframe. Q4_13_NA.shape Q4_13_NA.describe() Q4_13_NA.describe(exclude='number') Q4_13_NA[] segment_local =", "import pandas as pd file_name = r'/Users/andrewlowe/yipitdata/Q4_2013_Groupon_North_America_Data_XLSX.xlsx' df = pd.read_excel(file_name) # Q4_13_NA dataframe", "pandas as pd file_name = r'/Users/andrewlowe/yipitdata/Q4_2013_Groupon_North_America_Data_XLSX.xlsx' df = pd.read_excel(file_name) # Q4_13_NA dataframe Q4_13_NA", "= df # (138534, 7) There are 138534 items in the dataframe. Q4_13_NA.shape", "# Q4_13_NA dataframe Q4_13_NA = df # (138534, 7) There are 138534 items", "dataframe Q4_13_NA = df # (138534, 7) There are 138534 items in the", "(138534, 7) There are 138534 items in the dataframe. Q4_13_NA.shape Q4_13_NA.describe() Q4_13_NA.describe(exclude='number') Q4_13_NA[]", "= pd.read_excel(file_name) # Q4_13_NA dataframe Q4_13_NA = df # (138534, 7) There are", "Q4_13_NA.describe() Q4_13_NA.describe(exclude='number') Q4_13_NA[] segment_local = Q4_13_NA[(Q4_13_NA['Segment'] == 'Local')] segment.shape segment = Q4_13_NA[(Q4_13_NA['Segment'] ==", "items in the dataframe. Q4_13_NA.shape Q4_13_NA.describe() Q4_13_NA.describe(exclude='number') Q4_13_NA[] segment_local = Q4_13_NA[(Q4_13_NA['Segment'] == 'Local')]", "Q4_13_NA.describe(exclude='number') Q4_13_NA[] segment_local = Q4_13_NA[(Q4_13_NA['Segment'] == 'Local')] segment.shape segment = Q4_13_NA[(Q4_13_NA['Segment'] == 'Local')]", "pd.read_excel(file_name) # Q4_13_NA dataframe Q4_13_NA = df # (138534, 7) There are 138534", "as pd file_name = r'/Users/andrewlowe/yipitdata/Q4_2013_Groupon_North_America_Data_XLSX.xlsx' df = pd.read_excel(file_name) # Q4_13_NA dataframe Q4_13_NA =", "138534 items in the dataframe. Q4_13_NA.shape Q4_13_NA.describe() Q4_13_NA.describe(exclude='number') Q4_13_NA[] segment_local = Q4_13_NA[(Q4_13_NA['Segment'] ==", "the dataframe. Q4_13_NA.shape Q4_13_NA.describe() Q4_13_NA.describe(exclude='number') Q4_13_NA[] segment_local = Q4_13_NA[(Q4_13_NA['Segment'] == 'Local')] segment.shape segment", "pd file_name = r'/Users/andrewlowe/yipitdata/Q4_2013_Groupon_North_America_Data_XLSX.xlsx' df = pd.read_excel(file_name) # Q4_13_NA dataframe Q4_13_NA = df", "r'/Users/andrewlowe/yipitdata/Q4_2013_Groupon_North_America_Data_XLSX.xlsx' df = pd.read_excel(file_name) # Q4_13_NA dataframe Q4_13_NA = df # (138534, 7)" ]
[ "\"reverse####.jpg\", \"randwalk####.pdf\", and \"randwalk####.txt\", where \"####\" ranges through the powers of 2 from", "the cumulative differences between the subpopulation and the full population, controlling for the", "FileExistsError: pass dir += '/' # Consider both the original ordering of covariates", "a full population of 1,000 individual members and a subpopulation of 100 subselected", "hc = HilbertCurve(precision, p) ints = hc.distances_from_points(x) assert np.unique(ints).size == x.shape[0] # Sort", "f.write(f'{kolmogorov_smirnov:.4}\\n') f.write('Kuiper / lenscale:\\n') f.write(f'{(kuiper / lenscale):.4}\\n') f.write('Kolmogorov-Smirnov / lenscale:\\n') f.write(f'{(kolmogorov_smirnov / lenscale):.4}\\n')", "order from those named \"synth####.pdf\" and \"synth####.txt\". The files named \"randwalk####.pdf\" and \"randwalk####.txt\"", "w = w[::-1] # Generate responses based on the random direction and membership", "== 32: dtype = np.uint32 elif precision == 64: dtype = np.uint64 else:", "1)]: print(f'p = {p}') # Set up the random number generator. rng =", "to v (the Heaviside function is also known as the unit step function,", "# Generate responses based on the random direction and membership # in the", "- imin) # Ensure uniqueness even after roundoff errors. eps = np.finfo(np.float64).eps s", "name = 'randwalk' filename = dir + name + str(p).zfill(max_digits) + '.pdf' #", "Generate a random permutation for the indices of the subpopulation. inds = rng.permutation((m))[:n]", "s = s + np.arange(0, s.size * eps, eps) s = s.astype(np.float64) #", "f: f.write('m:\\n') f.write(f'{len(s)}\\n') f.write('n:\\n') f.write(f'{len(inds)}\\n') f.write('lenscale:\\n') f.write(f'{lenscale}\\n') f.write('Kuiper:\\n') f.write(f'{kuiper:.4}\\n') f.write('Kolmogorov-Smirnov:\\n') f.write(f'{kolmogorov_smirnov:.4}\\n') f.write('Kuiper /", "Create a directory as needed. dir = 'unweighted' try: os.mkdir(dir) except FileExistsError: pass", "via the following procedure, which consists of only a single stage for the", "the full population, controlling for the specified number of covariates. The corresponding txt", "# Set the number (p) of covariates. for p in [2**k for k", "population. That concludes the first stage of the procedure. For the files whose", "of characters for its length. max_digits = math.ceil(pmax * math.log(2) / math.log(10)) if", "the number (p) of covariates. for p in [2**k for k in range(1,", "from the uniform distribution over the interval (0, 1), where p is the", "covariates. for p in [2**k for k in range(1, pmax + 1)]: print(f'p", "and the full population, controlling for the specified number of covariates. The corresponding", "found in the LICENSE file in the root directory of this source tree.", "file in the root directory of this source tree. \"\"\" import math import", "the random direction and membership # in the subpopulation. centered = x.astype(np.float64) -", "the same number of characters for its length. max_digits = math.ceil(pmax * math.log(2)", "== 16: dtype = np.uint16 elif precision == 32: dtype = np.uint32 elif", "/ (imax - imin) # Ensure uniqueness even after roundoff errors. eps =", "{diff}') pmax = 12 # Set the number (p) of covariates. for p", "# Construct the graph of cumulative differences. majorticks = 10 minorticks = 100", "cumulative differences. majorticks = 10 minorticks = 100 kuiper, kolmogorov_smirnov, lenscale = cumulative(", "perm = np.argsort(ints) x = x[perm, :] invperm = np.arange(len(perm)) invperm[perm] = np.arange(len(perm))", "is also known as the unit step function, and takes the value 0", "after roundoff errors. eps = np.finfo(np.float64).eps s = s + np.arange(0, s.size *", "member of the full population consists of p independent and identically distributed draws", "distributed draws from the standard normal distribution, and finally then apply the Heaviside", "(np.sign(centered @ w) + 1) / 2 if diff: r[inds] = 1 #", "subpopulation. n = 100 # Set the number of bits in the discretization", "= (np.sign(centered @ w) + 1) / 2 if diff: r[inds] = 1", "begin \"synth...\" or begin \"reverse...\", we set the responses for all members of", "\"reverse...\"): we collect together the covariates for all the members into a 1000", "of 100 subselected uniformly at random from the full population. Each member of", "plots the cumulative differences between the subpopulation and the full population, controlling for", "# for the subpopulation. for (reverse, diff) in [(False, True), (True, True), (False,", "Construct the graph of cumulative differences. majorticks = 10 minorticks = 100 kuiper,", "a single stage for the files whose names begin \"randwalk...\", but consists of", "= w[::-1] # Generate responses based on the random direction and membership #", "(mantissa). precision = 64 # Determine the data type from precision. if precision", "precision. if precision == 8: dtype = np.uint8 elif precision == 16: dtype", "'synth' else: name = 'randwalk' filename = dir + name + str(p).zfill(max_digits) +", "the working directory if the directory does not already exist, then creates many", "dimensions to one dimension. hc = HilbertCurve(precision, p) ints = hc.distances_from_points(x) assert np.unique(ints).size", "of covariates. The corresponding txt files report metrics about the plots. The files", "This script creates a directory, \"unweighted\", in the working directory if the directory", "the full population. Each member of the full population consists of p independent", "together the covariates for all the members into a 1000 x p matrix", "concludes the first stage of the procedure. For the files whose names begin", "consists of a full population of 1,000 individual members and a subpopulation of", "1000 x p matrix x, construct the p x 1 vector v whose", "math import numpy as np from numpy.random import default_rng import os from hilbertcurve.hilbertcurve", "= np.uint64 else: raise TypeError(f'There is no support for precision = {precision}.') #", "for a range of synthetic toy examples. Copyright (c) Meta Platforms, Inc. and", "os.mkdir(dir) except FileExistsError: pass dir += '/' # Consider both the original ordering", "HilbertCurve(precision, p) ints = hc.distances_from_points(x) assert np.unique(ints).size == x.shape[0] # Sort according to", "so that every filename # has the same number of characters for its", "the plots. The files named \"reverse####.pdf\" and \"reverse####.txt\" condition on the covariates in", "try: os.mkdir(dir) except FileExistsError: pass dir += '/' # Consider both the original", "length. max_digits = math.ceil(pmax * math.log(2) / math.log(10)) if reverse and diff: name", "two separate stages for the files whose names begin \"synth...\" or \"reverse...\"): we", "consists of p independent and identically distributed draws from the uniform distribution over", "# in the subpopulation. centered = x.astype(np.float64) - 2**(precision - 1) r =", "in a text file. filename = filename[:-4] + '.txt' with open(filename, 'w') as", "inds = np.sort(inds) # Construct scores for plotting. imin = np.min(ints) imax =", "python3 \"\"\" Plot the subpopulation deviations for a range of synthetic toy examples.", "many files there. The filenames are \"synth####.pdf\", \"synth####.txt\", \"reverse####.pdf\", \"reverse####.jpg\", \"randwalk####.pdf\", and \"randwalk####.txt\",", "np.uint64 else: raise TypeError(f'There is no support for precision = {precision}.') # Create", "discretization (mantissa). precision = 64 # Determine the data type from precision. if", "s.astype(np.float64) # Form a random direction. w = rng.standard_normal(size=(p)) w /= np.linalg.norm(w, ord=2)", "32: dtype = np.uint32 elif precision == 64: dtype = np.uint64 else: raise", "both the original ordering of covariates and the reverse ordering, # as well", "negative arguments and the value 1 for positive arguments). The result is a", "for (reverse, diff) in [(False, True), (True, True), (False, False)]: print(f'reverse = {reverse}')", "examples. Copyright (c) Meta Platforms, Inc. and affiliates. This script creates a directory,", "subpopulation and the full population, controlling for the specified number of covariates. The", "Platforms, Inc. and affiliates. This script creates a directory, \"unweighted\", in the working", "needed. dir = 'unweighted' try: os.mkdir(dir) except FileExistsError: pass dir += '/' #", "- 1) r = (np.sign(centered @ w) + 1) / 2 if diff:", "\"synth####.pdf\" and \"synth####.txt\". The files named \"randwalk####.pdf\" and \"randwalk####.txt\" use the same distribution", "files whose names begin \"synth...\" or \"reverse...\"): we collect together the covariates for", "x.shape[0] # Sort according to the scores. perm = np.argsort(ints) x = x[perm,", "(reverse, diff) in [(False, True), (True, True), (False, False)]: print(f'reverse = {reverse}') print(f'diff", "size=(m, p), dtype=dtype) if reverse: x = x[:, ::-1] # Perform the Hilbert", "scores for plotting. imin = np.min(ints) imax = np.max(ints) s = (np.sort(ints) -", "Set up the random number generator. rng = default_rng(seed=543216789) # Generate a random", "math.ceil(pmax * math.log(2) / math.log(10)) if reverse and diff: name = 'reverse' elif", "a random permutation for the indices of the subpopulation. inds = rng.permutation((m))[:n] #", "controlling for the specified number of covariates. The corresponding txt files report metrics", "\"unweighted\", in the working directory if the directory does not already exist, then", "condition on all the covariates. We generate the responses via the following procedure,", "f.write(f'{lenscale}\\n') f.write('Kuiper:\\n') f.write(f'{kuiper:.4}\\n') f.write('Kolmogorov-Smirnov:\\n') f.write(f'{kolmogorov_smirnov:.4}\\n') f.write('Kuiper / lenscale:\\n') f.write(f'{(kuiper / lenscale):.4}\\n') f.write('Kolmogorov-Smirnov /", "for the files whose names begin \"synth...\" or \"reverse...\"): we collect together the", "# Pad with zeros the number in the filename so that every filename", "the same distribution of responses for the subpopulation as for the full population.", "tree. \"\"\" import math import numpy as np from numpy.random import default_rng import", "stages for the files whose names begin \"synth...\" or \"reverse...\"): we collect together", "the procedure. This source code is licensed under the MIT license found in", "= np.max(ints) s = (np.sort(ints) - imin) / (imax - imin) # Ensure", "dtype = np.uint16 elif precision == 32: dtype = np.uint32 elif precision ==", "name = 'synth' else: name = 'randwalk' filename = dir + name +", "x 1 vector v whose entries are independent and identically distributed draws from", "stage for the files whose names begin \"randwalk...\", but consists of two separate", "dimension. hc = HilbertCurve(precision, p) ints = hc.distances_from_points(x) assert np.unique(ints).size == x.shape[0] #", "invperm[perm] = np.arange(len(perm)) inds = invperm[inds] inds = np.sort(inds) # Construct scores for", "of the full population consists of p independent and identically distributed draws from", "(False, False)]: print(f'reverse = {reverse}') print(f'diff = {diff}') pmax = 12 # Set", ":] invperm = np.arange(len(perm)) invperm[perm] = np.arange(len(perm)) inds = invperm[inds] inds = np.sort(inds)", "of 1,000 individual members and a subpopulation of 100 subselected uniformly at random", "dtype = np.uint8 elif precision == 16: dtype = np.uint16 elif precision ==", "directory of this source tree. \"\"\" import math import numpy as np from", "the p x 1 vector v whose entries are independent and identically distributed", "f.write('n:\\n') f.write(f'{len(inds)}\\n') f.write('lenscale:\\n') f.write(f'{lenscale}\\n') f.write('Kuiper:\\n') f.write(f'{kuiper:.4}\\n') f.write('Kolmogorov-Smirnov:\\n') f.write(f'{kolmogorov_smirnov:.4}\\n') f.write('Kuiper / lenscale:\\n') f.write(f'{(kuiper /", "# Save metrics in a text file. filename = filename[:-4] + '.txt' with", "True), (False, False)]: print(f'reverse = {reverse}') print(f'diff = {diff}') pmax = 12 #", "diff: r[inds] = 1 # Pad with zeros the number in the filename", "for the corresponding members of the full population. That concludes the first stage", "TypeError(f'There is no support for precision = {precision}.') # Create a directory as", "for p in [2**k for k in range(1, pmax + 1)]: print(f'p =", "f.write('Kolmogorov-Smirnov:\\n') f.write(f'{kolmogorov_smirnov:.4}\\n') f.write('Kuiper / lenscale:\\n') f.write(f'{(kuiper / lenscale):.4}\\n') f.write('Kolmogorov-Smirnov / lenscale:\\n') f.write(f'{(kolmogorov_smirnov /", "dir += '/' # Consider both the original ordering of covariates and the", "as the second stage of the procedure. This source code is licensed under", "x-0.5) applied to v (the Heaviside function is also known as the unit", "for all the members into a 1000 x p matrix x, construct the", "in the responses # for the subpopulation. for (reverse, diff) in [(False, True),", "\"synth...\" or begin \"reverse...\", we set the responses for all members of the", "lenscale = cumulative( r, s, inds, majorticks, minorticks, filename=filename) # Save metrics in", "minorticks = 100 kuiper, kolmogorov_smirnov, lenscale = cumulative( r, s, inds, majorticks, minorticks,", "Set the size of the subpopulation. n = 100 # Set the number", "dtype = np.uint32 elif precision == 64: dtype = np.uint64 else: raise TypeError(f'There", "* eps, eps) s = s.astype(np.float64) # Form a random direction. w =", "x = x[:, ::-1] # Perform the Hilbert mapping from p dimensions to", "subpopulation. inds = rng.permutation((m))[:n] # Generate data at random. x = rng.integers(2**precision -", "names begin \"synth...\" or begin \"reverse...\", we set the responses for all members", "0 for negative arguments and the value 1 for positive arguments). The result", "imin = np.min(ints) imax = np.max(ints) s = (np.sort(ints) - imin) / (imax", "* math.log(2) / math.log(10)) if reverse and diff: name = 'reverse' elif diff:", "there. The filenames are \"synth####.pdf\", \"synth####.txt\", \"reverse####.pdf\", \"reverse####.jpg\", \"randwalk####.pdf\", and \"randwalk####.txt\", where \"####\"", "distribution of responses for the subpopulation as for the full population. The data", "of the subpopulation to 1, as the second stage of the procedure. This", "r, s, inds, majorticks, minorticks, filename=filename) # Save metrics in a text file.", "file. filename = filename[:-4] + '.txt' with open(filename, 'w') as f: f.write('m:\\n') f.write(f'{len(s)}\\n')", "'randwalk' filename = dir + name + str(p).zfill(max_digits) + '.pdf' # Construct the", "names begin \"synth...\" or \"reverse...\"): we collect together the covariates for all the", "significant deviation in the responses # for the subpopulation. for (reverse, diff) in", "# Set the size of the subpopulation. n = 100 # Set the", "from p dimensions to one dimension. hc = HilbertCurve(precision, p) ints = hc.distances_from_points(x)", "2 if diff: r[inds] = 1 # Pad with zeros the number in", "# has the same number of characters for its length. max_digits = math.ceil(pmax", "+ str(p).zfill(max_digits) + '.pdf' # Construct the graph of cumulative differences. majorticks =", "inds, majorticks, minorticks, filename=filename) # Save metrics in a text file. filename =", "0002 to 4096. Each pdf file plots the cumulative differences between the subpopulation", "population of 1,000 individual members and a subpopulation of 100 subselected uniformly at", "a 1000 x 1 vector of 0s and 1s whose entries are the", "responses # for the subpopulation. for (reverse, diff) in [(False, True), (True, True),", "pdf file plots the cumulative differences between the subpopulation and the full population,", "full population. That concludes the first stage of the procedure. For the files", "pmax = 12 # Set the number (p) of covariates. for p in", "use the same distribution of responses for the subpopulation as for the full", "os from hilbertcurve.hilbertcurve import HilbertCurve from subpop import cumulative # Set the number", "The files named \"reverse####.pdf\" and \"reverse####.txt\" condition on the covariates in the reverse", "\"synth####.pdf\", \"synth####.txt\", \"reverse####.pdf\", \"reverse####.jpg\", \"randwalk####.pdf\", and \"randwalk####.txt\", where \"####\" ranges through the powers", "random number generator. rng = default_rng(seed=543216789) # Generate a random permutation for the", "= 1000 # Set the size of the subpopulation. n = 100 #", "str(p).zfill(max_digits) + '.pdf' # Construct the graph of cumulative differences. majorticks = 10", "draws from the uniform distribution over the interval (0, 1), where p is", "metrics about the plots. The files named \"reverse####.pdf\" and \"reverse####.txt\" condition on the", "the subpopulation as for the full population. The data consists of a full", "= np.uint32 elif precision == 64: dtype = np.uint64 else: raise TypeError(f'There is", "at random from the full population. Each member of the full population consists", "is the number of covariates. We condition on all the covariates. We generate", "# Set the number of bits in the discretization (mantissa). precision = 64", "majorticks = 10 minorticks = 100 kuiper, kolmogorov_smirnov, lenscale = cumulative( r, s,", "distribution over the interval (0, 1), where p is the number of covariates.", "distributed draws from the uniform distribution over the interval (0, 1), where p", "and the value 1 for positive arguments). The result is a 1000 x", "plots. The files named \"reverse####.pdf\" and \"reverse####.txt\" condition on the covariates in the", "1) r = (np.sign(centered @ w) + 1) / 2 if diff: r[inds]", "report metrics about the plots. The files named \"reverse####.pdf\" and \"reverse####.txt\" condition on", "of the procedure. For the files whose names begin \"synth...\" or begin \"reverse...\",", "# Consider both the original ordering of covariates and the reverse ordering, #", "np from numpy.random import default_rng import os from hilbertcurve.hilbertcurve import HilbertCurve from subpop", "@ w) + 1) / 2 if diff: r[inds] = 1 # Pad", "filenames are \"synth####.pdf\", \"synth####.txt\", \"reverse####.pdf\", \"reverse####.jpg\", \"randwalk####.pdf\", and \"randwalk####.txt\", where \"####\" ranges through", "responses for the corresponding members of the full population. That concludes the first", "if reverse and diff: name = 'reverse' elif diff: name = 'synth' else:", "+ 1) / 2 if diff: r[inds] = 1 # Pad with zeros", "s = s.astype(np.float64) # Form a random direction. w = rng.standard_normal(size=(p)) w /=", "of cumulative differences. majorticks = 10 minorticks = 100 kuiper, kolmogorov_smirnov, lenscale =", "# Set the number of examples. m = 1000 # Set the size", "draws from the standard normal distribution, and finally then apply the Heaviside function", "+ name + str(p).zfill(max_digits) + '.pdf' # Construct the graph of cumulative differences.", "\"reverse####.txt\" condition on the covariates in the reverse order from those named \"synth####.pdf\"", "ordering, # as well as a complete lack of significant deviation in the", "'.txt' with open(filename, 'w') as f: f.write('m:\\n') f.write(f'{len(s)}\\n') f.write('n:\\n') f.write(f'{len(inds)}\\n') f.write('lenscale:\\n') f.write(f'{lenscale}\\n') f.write('Kuiper:\\n')", "one dimension. hc = HilbertCurve(precision, p) ints = hc.distances_from_points(x) assert np.unique(ints).size == x.shape[0]", "reverse ordering, # as well as a complete lack of significant deviation in", "ints = hc.distances_from_points(x) assert np.unique(ints).size == x.shape[0] # Sort according to the scores.", "1000 x 1 vector of 0s and 1s whose entries are the responses", "full population. The data consists of a full population of 1,000 individual members", "and affiliates. This script creates a directory, \"unweighted\", in the working directory if", "files report metrics about the plots. The files named \"reverse####.pdf\" and \"reverse####.txt\" condition", "\"\"\" Plot the subpopulation deviations for a range of synthetic toy examples. Copyright", "corresponding txt files report metrics about the plots. The files named \"reverse####.pdf\" and", "the files whose names begin \"synth...\" or begin \"reverse...\", we set the responses", "= np.finfo(np.float64).eps s = s + np.arange(0, s.size * eps, eps) s =", "name = 'reverse' elif diff: name = 'synth' else: name = 'randwalk' filename", "the Heaviside function to every entry of \"centered\" (= x-0.5) applied to v", "{p}') # Set up the random number generator. rng = default_rng(seed=543216789) # Generate", "np.uint32 elif precision == 64: dtype = np.uint64 else: raise TypeError(f'There is no", "the subpopulation deviations for a range of synthetic toy examples. Copyright (c) Meta", "as well as a complete lack of significant deviation in the responses #", "whose entries are the responses for the corresponding members of the full population.", "kolmogorov_smirnov, lenscale = cumulative( r, s, inds, majorticks, minorticks, filename=filename) # Save metrics", "else: name = 'randwalk' filename = dir + name + str(p).zfill(max_digits) + '.pdf'", "the full population consists of p independent and identically distributed draws from the", "of examples. m = 1000 # Set the size of the subpopulation. n", "of covariates and the reverse ordering, # as well as a complete lack", "else: raise TypeError(f'There is no support for precision = {precision}.') # Create a", "x[:, ::-1] # Perform the Hilbert mapping from p dimensions to one dimension.", "/= np.linalg.norm(w, ord=2) if reverse: w = w[::-1] # Generate responses based on", "# Create a directory as needed. dir = 'unweighted' try: os.mkdir(dir) except FileExistsError:", "rng.integers(2**precision - 1, size=(m, p), dtype=dtype) if reverse: x = x[:, ::-1] #", "+ '.pdf' # Construct the graph of cumulative differences. majorticks = 10 minorticks", "= 12 # Set the number (p) of covariates. for p in [2**k", "random. x = rng.integers(2**precision - 1, size=(m, p), dtype=dtype) if reverse: x =", "== 64: dtype = np.uint64 else: raise TypeError(f'There is no support for precision", "of 2 from 0002 to 4096. Each pdf file plots the cumulative differences", "the full population. The data consists of a full population of 1,000 individual", "and membership # in the subpopulation. centered = x.astype(np.float64) - 2**(precision - 1)", "then apply the Heaviside function to every entry of \"centered\" (= x-0.5) applied", "s + np.arange(0, s.size * eps, eps) s = s.astype(np.float64) # Form a", "filename=filename) # Save metrics in a text file. filename = filename[:-4] + '.txt'", "{precision}.') # Create a directory as needed. dir = 'unweighted' try: os.mkdir(dir) except", "the root directory of this source tree. \"\"\" import math import numpy as", "then creates many files there. The filenames are \"synth####.pdf\", \"synth####.txt\", \"reverse####.pdf\", \"reverse####.jpg\", \"randwalk####.pdf\",", "# Construct scores for plotting. imin = np.min(ints) imax = np.max(ints) s =", "the specified number of covariates. The corresponding txt files report metrics about the", "for the files whose names begin \"randwalk...\", but consists of two separate stages", "where \"####\" ranges through the powers of 2 from 0002 to 4096. Each", "= HilbertCurve(precision, p) ints = hc.distances_from_points(x) assert np.unique(ints).size == x.shape[0] # Sort according", "from subpop import cumulative # Set the number of examples. m = 1000", "even after roundoff errors. eps = np.finfo(np.float64).eps s = s + np.arange(0, s.size", "of significant deviation in the responses # for the subpopulation. for (reverse, diff)", "if reverse: x = x[:, ::-1] # Perform the Hilbert mapping from p", "<reponame>facebookresearch/metamulti #!/usr/bin/env python3 \"\"\" Plot the subpopulation deviations for a range of synthetic", "\"reverse####.pdf\", \"reverse####.jpg\", \"randwalk####.pdf\", and \"randwalk####.txt\", where \"####\" ranges through the powers of 2", "txt files report metrics about the plots. The files named \"reverse####.pdf\" and \"reverse####.txt\"", "the covariates. We generate the responses via the following procedure, which consists of", "if precision == 8: dtype = np.uint8 elif precision == 16: dtype =", "direction. w = rng.standard_normal(size=(p)) w /= np.linalg.norm(w, ord=2) if reverse: w = w[::-1]", "We generate the responses via the following procedure, which consists of only a", "w) + 1) / 2 if diff: r[inds] = 1 # Pad with", "default_rng import os from hilbertcurve.hilbertcurve import HilbertCurve from subpop import cumulative # Set", "covariates. We generate the responses via the following procedure, which consists of only", "to every entry of \"centered\" (= x-0.5) applied to v (the Heaviside function", "already exist, then creates many files there. The filenames are \"synth####.pdf\", \"synth####.txt\", \"reverse####.pdf\",", "only a single stage for the files whose names begin \"randwalk...\", but consists", "members of the full population. That concludes the first stage of the procedure.", "= {diff}') pmax = 12 # Set the number (p) of covariates. for", "from the full population. Each member of the full population consists of p", "population. Each member of the full population consists of p independent and identically", "full population, controlling for the specified number of covariates. The corresponding txt files", "differences between the subpopulation and the full population, controlling for the specified number", "function, and takes the value 0 for negative arguments and the value 1", "for the full population. The data consists of a full population of 1,000", "number of covariates. We condition on all the covariates. We generate the responses", "are independent and identically distributed draws from the standard normal distribution, and finally", "number (p) of covariates. for p in [2**k for k in range(1, pmax", "= 'synth' else: name = 'randwalk' filename = dir + name + str(p).zfill(max_digits)", "all the members into a 1000 x p matrix x, construct the p", "the following procedure, which consists of only a single stage for the files", "begin \"reverse...\", we set the responses for all members of the subpopulation to", "a complete lack of significant deviation in the responses # for the subpopulation.", "mapping from p dimensions to one dimension. hc = HilbertCurve(precision, p) ints =", "covariates for all the members into a 1000 x p matrix x, construct", "license found in the LICENSE file in the root directory of this source", "directory as needed. dir = 'unweighted' try: os.mkdir(dir) except FileExistsError: pass dir +=", "= invperm[inds] inds = np.sort(inds) # Construct scores for plotting. imin = np.min(ints)", "number of characters for its length. max_digits = math.ceil(pmax * math.log(2) / math.log(10))", "lack of significant deviation in the responses # for the subpopulation. for (reverse,", "np.linalg.norm(w, ord=2) if reverse: w = w[::-1] # Generate responses based on the", "numpy.random import default_rng import os from hilbertcurve.hilbertcurve import HilbertCurve from subpop import cumulative", "of responses for the subpopulation as for the full population. The data consists", "arguments). The result is a 1000 x 1 vector of 0s and 1s", "subpopulation. centered = x.astype(np.float64) - 2**(precision - 1) r = (np.sign(centered @ w)", "= hc.distances_from_points(x) assert np.unique(ints).size == x.shape[0] # Sort according to the scores. perm", "= s.astype(np.float64) # Form a random direction. w = rng.standard_normal(size=(p)) w /= np.linalg.norm(w,", "that every filename # has the same number of characters for its length.", "{reverse}') print(f'diff = {diff}') pmax = 12 # Set the number (p) of", "characters for its length. max_digits = math.ceil(pmax * math.log(2) / math.log(10)) if reverse", "those named \"synth####.pdf\" and \"synth####.txt\". The files named \"randwalk####.pdf\" and \"randwalk####.txt\" use the", "stage of the procedure. For the files whose names begin \"synth...\" or begin", "root directory of this source tree. \"\"\" import math import numpy as np", "the size of the subpopulation. n = 100 # Set the number of", "type from precision. if precision == 8: dtype = np.uint8 elif precision ==", "random direction and membership # in the subpopulation. centered = x.astype(np.float64) - 2**(precision", "population, controlling for the specified number of covariates. The corresponding txt files report", "of only a single stage for the files whose names begin \"randwalk...\", but", "full population of 1,000 individual members and a subpopulation of 100 subselected uniformly", "is a 1000 x 1 vector of 0s and 1s whose entries are", "np.argsort(ints) x = x[perm, :] invperm = np.arange(len(perm)) invperm[perm] = np.arange(len(perm)) inds =", "diff: name = 'synth' else: name = 'randwalk' filename = dir + name", "this source tree. \"\"\" import math import numpy as np from numpy.random import", "= 100 # Set the number of bits in the discretization (mantissa). precision", "raise TypeError(f'There is no support for precision = {precision}.') # Create a directory", "'w') as f: f.write('m:\\n') f.write(f'{len(s)}\\n') f.write('n:\\n') f.write(f'{len(inds)}\\n') f.write('lenscale:\\n') f.write(f'{lenscale}\\n') f.write('Kuiper:\\n') f.write(f'{kuiper:.4}\\n') f.write('Kolmogorov-Smirnov:\\n') f.write(f'{kolmogorov_smirnov:.4}\\n')", "x = rng.integers(2**precision - 1, size=(m, p), dtype=dtype) if reverse: x = x[:,", "invperm[inds] inds = np.sort(inds) # Construct scores for plotting. imin = np.min(ints) imax", "dtype = np.uint64 else: raise TypeError(f'There is no support for precision = {precision}.')", "construct the p x 1 vector v whose entries are independent and identically", "original ordering of covariates and the reverse ordering, # as well as a", "\"synth####.txt\". The files named \"randwalk####.pdf\" and \"randwalk####.txt\" use the same distribution of responses", "100 subselected uniformly at random from the full population. Each member of the", "second stage of the procedure. This source code is licensed under the MIT", "in the subpopulation. centered = x.astype(np.float64) - 2**(precision - 1) r = (np.sign(centered", "MIT license found in the LICENSE file in the root directory of this", "from numpy.random import default_rng import os from hilbertcurve.hilbertcurve import HilbertCurve from subpop import", "named \"synth####.pdf\" and \"synth####.txt\". The files named \"randwalk####.pdf\" and \"randwalk####.txt\" use the same", "= np.min(ints) imax = np.max(ints) s = (np.sort(ints) - imin) / (imax -", "r = (np.sign(centered @ w) + 1) / 2 if diff: r[inds] =", "= 'reverse' elif diff: name = 'synth' else: name = 'randwalk' filename =", "on the random direction and membership # in the subpopulation. centered = x.astype(np.float64)", "the responses via the following procedure, which consists of only a single stage", "1 vector v whose entries are independent and identically distributed draws from the", "to 1, as the second stage of the procedure. This source code is", "value 0 for negative arguments and the value 1 for positive arguments). The", "2 from 0002 to 4096. Each pdf file plots the cumulative differences between", "k in range(1, pmax + 1)]: print(f'p = {p}') # Set up the", "files whose names begin \"synth...\" or begin \"reverse...\", we set the responses for", "math.log(10)) if reverse and diff: name = 'reverse' elif diff: name = 'synth'", "independent and identically distributed draws from the uniform distribution over the interval (0,", "members into a 1000 x p matrix x, construct the p x 1", "subpopulation deviations for a range of synthetic toy examples. Copyright (c) Meta Platforms,", "a directory as needed. dir = 'unweighted' try: os.mkdir(dir) except FileExistsError: pass dir", "subpopulation to 1, as the second stage of the procedure. This source code", "= rng.integers(2**precision - 1, size=(m, p), dtype=dtype) if reverse: x = x[:, ::-1]", "dir + name + str(p).zfill(max_digits) + '.pdf' # Construct the graph of cumulative", "between the subpopulation and the full population, controlling for the specified number of", "Plot the subpopulation deviations for a range of synthetic toy examples. Copyright (c)", "precision = 64 # Determine the data type from precision. if precision ==", "= 100 kuiper, kolmogorov_smirnov, lenscale = cumulative( r, s, inds, majorticks, minorticks, filename=filename)", "Save metrics in a text file. filename = filename[:-4] + '.txt' with open(filename,", "consists of only a single stage for the files whose names begin \"randwalk...\",", "the number of bits in the discretization (mantissa). precision = 64 # Determine", "# Perform the Hilbert mapping from p dimensions to one dimension. hc =", "single stage for the files whose names begin \"randwalk...\", but consists of two", "12 # Set the number (p) of covariates. for p in [2**k for", "p x 1 vector v whose entries are independent and identically distributed draws", "based on the random direction and membership # in the subpopulation. centered =", "in the root directory of this source tree. \"\"\" import math import numpy", "the responses # for the subpopulation. for (reverse, diff) in [(False, True), (True,", "f.write(f'{kuiper:.4}\\n') f.write('Kolmogorov-Smirnov:\\n') f.write(f'{kolmogorov_smirnov:.4}\\n') f.write('Kuiper / lenscale:\\n') f.write(f'{(kuiper / lenscale):.4}\\n') f.write('Kolmogorov-Smirnov / lenscale:\\n') f.write(f'{(kolmogorov_smirnov", "f.write(f'{len(s)}\\n') f.write('n:\\n') f.write(f'{len(inds)}\\n') f.write('lenscale:\\n') f.write(f'{lenscale}\\n') f.write('Kuiper:\\n') f.write(f'{kuiper:.4}\\n') f.write('Kolmogorov-Smirnov:\\n') f.write(f'{kolmogorov_smirnov:.4}\\n') f.write('Kuiper / lenscale:\\n') f.write(f'{(kuiper", "inds = invperm[inds] inds = np.sort(inds) # Construct scores for plotting. imin =", "responses based on the random direction and membership # in the subpopulation. centered", "in the discretization (mantissa). precision = 64 # Determine the data type from", "creates a directory, \"unweighted\", in the working directory if the directory does not", "Each member of the full population consists of p independent and identically distributed", "x p matrix x, construct the p x 1 vector v whose entries", "vector v whose entries are independent and identically distributed draws from the standard", "begin \"synth...\" or \"reverse...\"): we collect together the covariates for all the members", "= x.astype(np.float64) - 2**(precision - 1) r = (np.sign(centered @ w) + 1)", "== x.shape[0] # Sort according to the scores. perm = np.argsort(ints) x =", "/ math.log(10)) if reverse and diff: name = 'reverse' elif diff: name =", "number generator. rng = default_rng(seed=543216789) # Generate a random permutation for the indices", "name + str(p).zfill(max_digits) + '.pdf' # Construct the graph of cumulative differences. majorticks", "+= '/' # Consider both the original ordering of covariates and the reverse", "'/' # Consider both the original ordering of covariates and the reverse ordering,", "The data consists of a full population of 1,000 individual members and a", "import HilbertCurve from subpop import cumulative # Set the number of examples. m", "Set the number of examples. m = 1000 # Set the size of", "known as the unit step function, and takes the value 0 for negative", "(= x-0.5) applied to v (the Heaviside function is also known as the", "as needed. dir = 'unweighted' try: os.mkdir(dir) except FileExistsError: pass dir += '/'", "import os from hilbertcurve.hilbertcurve import HilbertCurve from subpop import cumulative # Set the", "of a full population of 1,000 individual members and a subpopulation of 100", "Each pdf file plots the cumulative differences between the subpopulation and the full", "a subpopulation of 100 subselected uniformly at random from the full population. Each", "# Determine the data type from precision. if precision == 8: dtype =", "generate the responses via the following procedure, which consists of only a single", "pmax + 1)]: print(f'p = {p}') # Set up the random number generator.", "has the same number of characters for its length. max_digits = math.ceil(pmax *", "1 for positive arguments). The result is a 1000 x 1 vector of", "of the subpopulation. inds = rng.permutation((m))[:n] # Generate data at random. x =", "eps) s = s.astype(np.float64) # Form a random direction. w = rng.standard_normal(size=(p)) w", "16: dtype = np.uint16 elif precision == 32: dtype = np.uint32 elif precision", "2**(precision - 1) r = (np.sign(centered @ w) + 1) / 2 if", "= 10 minorticks = 100 kuiper, kolmogorov_smirnov, lenscale = cumulative( r, s, inds,", "dir = 'unweighted' try: os.mkdir(dir) except FileExistsError: pass dir += '/' # Consider", "but consists of two separate stages for the files whose names begin \"synth...\"", "(True, True), (False, False)]: print(f'reverse = {reverse}') print(f'diff = {diff}') pmax = 12", "well as a complete lack of significant deviation in the responses # for", "+ '.txt' with open(filename, 'w') as f: f.write('m:\\n') f.write(f'{len(s)}\\n') f.write('n:\\n') f.write(f'{len(inds)}\\n') f.write('lenscale:\\n') f.write(f'{lenscale}\\n')", "This source code is licensed under the MIT license found in the LICENSE", "Copyright (c) Meta Platforms, Inc. and affiliates. This script creates a directory, \"unweighted\",", "a text file. filename = filename[:-4] + '.txt' with open(filename, 'w') as f:", "filename[:-4] + '.txt' with open(filename, 'w') as f: f.write('m:\\n') f.write(f'{len(s)}\\n') f.write('n:\\n') f.write(f'{len(inds)}\\n') f.write('lenscale:\\n')", "the interval (0, 1), where p is the number of covariates. We condition", "the LICENSE file in the root directory of this source tree. \"\"\" import", "the data type from precision. if precision == 8: dtype = np.uint8 elif", "v whose entries are independent and identically distributed draws from the standard normal", "or begin \"reverse...\", we set the responses for all members of the subpopulation", "1s whose entries are the responses for the corresponding members of the full", "\"reverse...\", we set the responses for all members of the subpopulation to 1,", "= x[perm, :] invperm = np.arange(len(perm)) invperm[perm] = np.arange(len(perm)) inds = invperm[inds] inds", "arguments and the value 1 for positive arguments). The result is a 1000", "for the subpopulation as for the full population. The data consists of a", "source tree. \"\"\" import math import numpy as np from numpy.random import default_rng", "the subpopulation to 1, as the second stage of the procedure. This source", "the indices of the subpopulation. inds = rng.permutation((m))[:n] # Generate data at random.", "# as well as a complete lack of significant deviation in the responses", "Set the number of bits in the discretization (mantissa). precision = 64 #", "p in [2**k for k in range(1, pmax + 1)]: print(f'p = {p}')", "support for precision = {precision}.') # Create a directory as needed. dir =", "from hilbertcurve.hilbertcurve import HilbertCurve from subpop import cumulative # Set the number of", "(0, 1), where p is the number of covariates. We condition on all", "of \"centered\" (= x-0.5) applied to v (the Heaviside function is also known", "except FileExistsError: pass dir += '/' # Consider both the original ordering of", "import numpy as np from numpy.random import default_rng import os from hilbertcurve.hilbertcurve import", "cumulative( r, s, inds, majorticks, minorticks, filename=filename) # Save metrics in a text", "The files named \"randwalk####.pdf\" and \"randwalk####.txt\" use the same distribution of responses for", "x 1 vector of 0s and 1s whose entries are the responses for", "every filename # has the same number of characters for its length. max_digits", "f.write(f'{len(inds)}\\n') f.write('lenscale:\\n') f.write(f'{lenscale}\\n') f.write('Kuiper:\\n') f.write(f'{kuiper:.4}\\n') f.write('Kolmogorov-Smirnov:\\n') f.write(f'{kolmogorov_smirnov:.4}\\n') f.write('Kuiper / lenscale:\\n') f.write(f'{(kuiper / lenscale):.4}\\n')", "procedure. For the files whose names begin \"synth...\" or begin \"reverse...\", we set", "over the interval (0, 1), where p is the number of covariates. We", "as f: f.write('m:\\n') f.write(f'{len(s)}\\n') f.write('n:\\n') f.write(f'{len(inds)}\\n') f.write('lenscale:\\n') f.write(f'{lenscale}\\n') f.write('Kuiper:\\n') f.write(f'{kuiper:.4}\\n') f.write('Kolmogorov-Smirnov:\\n') f.write(f'{kolmogorov_smirnov:.4}\\n') f.write('Kuiper", "scores. perm = np.argsort(ints) x = x[perm, :] invperm = np.arange(len(perm)) invperm[perm] =", "collect together the covariates for all the members into a 1000 x p", "# Sort according to the scores. perm = np.argsort(ints) x = x[perm, :]", "= 1 # Pad with zeros the number in the filename so that", "Generate responses based on the random direction and membership # in the subpopulation.", "rng.permutation((m))[:n] # Generate data at random. x = rng.integers(2**precision - 1, size=(m, p),", "whose names begin \"randwalk...\", but consists of two separate stages for the files", "affiliates. This script creates a directory, \"unweighted\", in the working directory if the", "cumulative # Set the number of examples. m = 1000 # Set the", "direction and membership # in the subpopulation. centered = x.astype(np.float64) - 2**(precision -", "For the files whose names begin \"synth...\" or begin \"reverse...\", we set the", "creates many files there. The filenames are \"synth####.pdf\", \"synth####.txt\", \"reverse####.pdf\", \"reverse####.jpg\", \"randwalk####.pdf\", and", "\"centered\" (= x-0.5) applied to v (the Heaviside function is also known as", "ordering of covariates and the reverse ordering, # as well as a complete", "eps, eps) s = s.astype(np.float64) # Form a random direction. w = rng.standard_normal(size=(p))", "np.sort(inds) # Construct scores for plotting. imin = np.min(ints) imax = np.max(ints) s", "Ensure uniqueness even after roundoff errors. eps = np.finfo(np.float64).eps s = s +", "reverse: x = x[:, ::-1] # Perform the Hilbert mapping from p dimensions", "identically distributed draws from the uniform distribution over the interval (0, 1), where", "elif precision == 64: dtype = np.uint64 else: raise TypeError(f'There is no support", "the number of examples. m = 1000 # Set the size of the", "in the working directory if the directory does not already exist, then creates", "set the responses for all members of the subpopulation to 1, as the", "Generate data at random. x = rng.integers(2**precision - 1, size=(m, p), dtype=dtype) if", "s = (np.sort(ints) - imin) / (imax - imin) # Ensure uniqueness even", "== 8: dtype = np.uint8 elif precision == 16: dtype = np.uint16 elif", "elif diff: name = 'synth' else: name = 'randwalk' filename = dir +", "of this source tree. \"\"\" import math import numpy as np from numpy.random", "plotting. imin = np.min(ints) imax = np.max(ints) s = (np.sort(ints) - imin) /", "responses for all members of the subpopulation to 1, as the second stage", "for the subpopulation. for (reverse, diff) in [(False, True), (True, True), (False, False)]:", "function is also known as the unit step function, and takes the value", "dtype=dtype) if reverse: x = x[:, ::-1] # Perform the Hilbert mapping from", "the subpopulation. n = 100 # Set the number of bits in the", "subpop import cumulative # Set the number of examples. m = 1000 #", "subpopulation as for the full population. The data consists of a full population", "= np.uint8 elif precision == 16: dtype = np.uint16 elif precision == 32:", "the covariates in the reverse order from those named \"synth####.pdf\" and \"synth####.txt\". The", "4096. Each pdf file plots the cumulative differences between the subpopulation and the", "licensed under the MIT license found in the LICENSE file in the root", "takes the value 0 for negative arguments and the value 1 for positive", "for all members of the subpopulation to 1, as the second stage of", "random direction. w = rng.standard_normal(size=(p)) w /= np.linalg.norm(w, ord=2) if reverse: w =", "through the powers of 2 from 0002 to 4096. Each pdf file plots", "distribution, and finally then apply the Heaviside function to every entry of \"centered\"", "= (np.sort(ints) - imin) / (imax - imin) # Ensure uniqueness even after", "LICENSE file in the root directory of this source tree. \"\"\" import math", "p dimensions to one dimension. hc = HilbertCurve(precision, p) ints = hc.distances_from_points(x) assert", "\"synth####.txt\", \"reverse####.pdf\", \"reverse####.jpg\", \"randwalk####.pdf\", and \"randwalk####.txt\", where \"####\" ranges through the powers of", "we set the responses for all members of the subpopulation to 1, as", "according to the scores. perm = np.argsort(ints) x = x[perm, :] invperm =", "uniformly at random from the full population. Each member of the full population", "procedure, which consists of only a single stage for the files whose names", "n = 100 # Set the number of bits in the discretization (mantissa).", "same number of characters for its length. max_digits = math.ceil(pmax * math.log(2) /", "ord=2) if reverse: w = w[::-1] # Generate responses based on the random", "text file. filename = filename[:-4] + '.txt' with open(filename, 'w') as f: f.write('m:\\n')", "- imin) / (imax - imin) # Ensure uniqueness even after roundoff errors.", "64 # Determine the data type from precision. if precision == 8: dtype", "for the specified number of covariates. The corresponding txt files report metrics about", "whose names begin \"synth...\" or \"reverse...\"): we collect together the covariates for all", "and finally then apply the Heaviside function to every entry of \"centered\" (=", "number in the filename so that every filename # has the same number", "deviation in the responses # for the subpopulation. for (reverse, diff) in [(False,", "precision = {precision}.') # Create a directory as needed. dir = 'unweighted' try:", "Form a random direction. w = rng.standard_normal(size=(p)) w /= np.linalg.norm(w, ord=2) if reverse:", "is no support for precision = {precision}.') # Create a directory as needed.", "Consider both the original ordering of covariates and the reverse ordering, # as", "1 vector of 0s and 1s whose entries are the responses for the", "print(f'diff = {diff}') pmax = 12 # Set the number (p) of covariates.", "# Generate a random permutation for the indices of the subpopulation. inds =", "(imax - imin) # Ensure uniqueness even after roundoff errors. eps = np.finfo(np.float64).eps", "[2**k for k in range(1, pmax + 1)]: print(f'p = {p}') # Set", "np.arange(0, s.size * eps, eps) s = s.astype(np.float64) # Form a random direction.", "Inc. and affiliates. This script creates a directory, \"unweighted\", in the working directory", "the uniform distribution over the interval (0, 1), where p is the number", "as for the full population. The data consists of a full population of", "random from the full population. Each member of the full population consists of", "population. The data consists of a full population of 1,000 individual members and", "- 1, size=(m, p), dtype=dtype) if reverse: x = x[:, ::-1] # Perform", "the subpopulation. centered = x.astype(np.float64) - 2**(precision - 1) r = (np.sign(centered @", "kuiper, kolmogorov_smirnov, lenscale = cumulative( r, s, inds, majorticks, minorticks, filename=filename) # Save", "normal distribution, and finally then apply the Heaviside function to every entry of", "# Set up the random number generator. rng = default_rng(seed=543216789) # Generate a", "code is licensed under the MIT license found in the LICENSE file in", "from 0002 to 4096. Each pdf file plots the cumulative differences between the", "import cumulative # Set the number of examples. m = 1000 # Set", "number of examples. m = 1000 # Set the size of the subpopulation.", "toy examples. Copyright (c) Meta Platforms, Inc. and affiliates. This script creates a", "::-1] # Perform the Hilbert mapping from p dimensions to one dimension. hc", "filename = filename[:-4] + '.txt' with open(filename, 'w') as f: f.write('m:\\n') f.write(f'{len(s)}\\n') f.write('n:\\n')", "np.arange(len(perm)) inds = invperm[inds] inds = np.sort(inds) # Construct scores for plotting. imin", "imax = np.max(ints) s = (np.sort(ints) - imin) / (imax - imin) #", "of covariates. We condition on all the covariates. We generate the responses via", "zeros the number in the filename so that every filename # has the", "indices of the subpopulation. inds = rng.permutation((m))[:n] # Generate data at random. x", "and \"randwalk####.txt\" use the same distribution of responses for the subpopulation as for", "\"####\" ranges through the powers of 2 from 0002 to 4096. Each pdf", "Hilbert mapping from p dimensions to one dimension. hc = HilbertCurve(precision, p) ints", "with open(filename, 'w') as f: f.write('m:\\n') f.write(f'{len(s)}\\n') f.write('n:\\n') f.write(f'{len(inds)}\\n') f.write('lenscale:\\n') f.write(f'{lenscale}\\n') f.write('Kuiper:\\n') f.write(f'{kuiper:.4}\\n')", "applied to v (the Heaviside function is also known as the unit step", "precision == 32: dtype = np.uint32 elif precision == 64: dtype = np.uint64", "Set the number (p) of covariates. for p in [2**k for k in", "We condition on all the covariates. We generate the responses via the following", "Pad with zeros the number in the filename so that every filename #", "of the subpopulation. n = 100 # Set the number of bits in", "Meta Platforms, Inc. and affiliates. This script creates a directory, \"unweighted\", in the", "\"synth...\" or \"reverse...\"): we collect together the covariates for all the members into", "1000 # Set the size of the subpopulation. n = 100 # Set", "in the reverse order from those named \"synth####.pdf\" and \"synth####.txt\". The files named", "in [2**k for k in range(1, pmax + 1)]: print(f'p = {p}') #", "permutation for the indices of the subpopulation. inds = rng.permutation((m))[:n] # Generate data", "at random. x = rng.integers(2**precision - 1, size=(m, p), dtype=dtype) if reverse: x", "p), dtype=dtype) if reverse: x = x[:, ::-1] # Perform the Hilbert mapping", "covariates. We condition on all the covariates. We generate the responses via the", "and the reverse ordering, # as well as a complete lack of significant", "print(f'p = {p}') # Set up the random number generator. rng = default_rng(seed=543216789)", "= x[:, ::-1] # Perform the Hilbert mapping from p dimensions to one", "1) / 2 if diff: r[inds] = 1 # Pad with zeros the", "full population. Each member of the full population consists of p independent and", "the corresponding members of the full population. That concludes the first stage of", "Determine the data type from precision. if precision == 8: dtype = np.uint8", "and diff: name = 'reverse' elif diff: name = 'synth' else: name =", "#!/usr/bin/env python3 \"\"\" Plot the subpopulation deviations for a range of synthetic toy", "members and a subpopulation of 100 subselected uniformly at random from the full", "every entry of \"centered\" (= x-0.5) applied to v (the Heaviside function is", "from those named \"synth####.pdf\" and \"synth####.txt\". The files named \"randwalk####.pdf\" and \"randwalk####.txt\" use", "= {p}') # Set up the random number generator. rng = default_rng(seed=543216789) #", "to 4096. Each pdf file plots the cumulative differences between the subpopulation and", "= default_rng(seed=543216789) # Generate a random permutation for the indices of the subpopulation.", "in range(1, pmax + 1)]: print(f'p = {p}') # Set up the random", "imin) / (imax - imin) # Ensure uniqueness even after roundoff errors. eps", "the responses for all members of the subpopulation to 1, as the second", "function to every entry of \"centered\" (= x-0.5) applied to v (the Heaviside", "= rng.permutation((m))[:n] # Generate data at random. x = rng.integers(2**precision - 1, size=(m,", "the random number generator. rng = default_rng(seed=543216789) # Generate a random permutation for", "the Hilbert mapping from p dimensions to one dimension. hc = HilbertCurve(precision, p)", "in the filename so that every filename # has the same number of", "the first stage of the procedure. For the files whose names begin \"synth...\"", "to the scores. perm = np.argsort(ints) x = x[perm, :] invperm = np.arange(len(perm))", "to one dimension. hc = HilbertCurve(precision, p) ints = hc.distances_from_points(x) assert np.unique(ints).size ==", "m = 1000 # Set the size of the subpopulation. n = 100", "if reverse: w = w[::-1] # Generate responses based on the random direction", "uniqueness even after roundoff errors. eps = np.finfo(np.float64).eps s = s + np.arange(0,", "the reverse ordering, # as well as a complete lack of significant deviation", "and takes the value 0 for negative arguments and the value 1 for", "/ 2 if diff: r[inds] = 1 # Pad with zeros the number", "entries are the responses for the corresponding members of the full population. That", "procedure. This source code is licensed under the MIT license found in the", "1, size=(m, p), dtype=dtype) if reverse: x = x[:, ::-1] # Perform the", "x, construct the p x 1 vector v whose entries are independent and", "up the random number generator. rng = default_rng(seed=543216789) # Generate a random permutation", "no support for precision = {precision}.') # Create a directory as needed. dir", "random permutation for the indices of the subpopulation. inds = rng.permutation((m))[:n] # Generate", "data at random. x = rng.integers(2**precision - 1, size=(m, p), dtype=dtype) if reverse:", "w[::-1] # Generate responses based on the random direction and membership # in", "are the responses for the corresponding members of the full population. That concludes", "w /= np.linalg.norm(w, ord=2) if reverse: w = w[::-1] # Generate responses based", "entry of \"centered\" (= x-0.5) applied to v (the Heaviside function is also", "f.write('m:\\n') f.write(f'{len(s)}\\n') f.write('n:\\n') f.write(f'{len(inds)}\\n') f.write('lenscale:\\n') f.write(f'{lenscale}\\n') f.write('Kuiper:\\n') f.write(f'{kuiper:.4}\\n') f.write('Kolmogorov-Smirnov:\\n') f.write(f'{kolmogorov_smirnov:.4}\\n') f.write('Kuiper / lenscale:\\n')", "and \"reverse####.txt\" condition on the covariates in the reverse order from those named", "= np.arange(len(perm)) inds = invperm[inds] inds = np.sort(inds) # Construct scores for plotting.", "with zeros the number in the filename so that every filename # has", "inds = rng.permutation((m))[:n] # Generate data at random. x = rng.integers(2**precision - 1,", "The result is a 1000 x 1 vector of 0s and 1s whose", "exist, then creates many files there. The filenames are \"synth####.pdf\", \"synth####.txt\", \"reverse####.pdf\", \"reverse####.jpg\",", "reverse order from those named \"synth####.pdf\" and \"synth####.txt\". The files named \"randwalk####.pdf\" and", "= rng.standard_normal(size=(p)) w /= np.linalg.norm(w, ord=2) if reverse: w = w[::-1] # Generate", "f.write('lenscale:\\n') f.write(f'{lenscale}\\n') f.write('Kuiper:\\n') f.write(f'{kuiper:.4}\\n') f.write('Kolmogorov-Smirnov:\\n') f.write(f'{kolmogorov_smirnov:.4}\\n') f.write('Kuiper / lenscale:\\n') f.write(f'{(kuiper / lenscale):.4}\\n') f.write('Kolmogorov-Smirnov", "responses for the subpopulation as for the full population. The data consists of", "data type from precision. if precision == 8: dtype = np.uint8 elif precision", "the subpopulation. for (reverse, diff) in [(False, True), (True, True), (False, False)]: print(f'reverse", "imin) # Ensure uniqueness even after roundoff errors. eps = np.finfo(np.float64).eps s =", "begin \"randwalk...\", but consists of two separate stages for the files whose names", "print(f'reverse = {reverse}') print(f'diff = {diff}') pmax = 12 # Set the number", "a random direction. w = rng.standard_normal(size=(p)) w /= np.linalg.norm(w, ord=2) if reverse: w", "subpopulation. for (reverse, diff) in [(False, True), (True, True), (False, False)]: print(f'reverse =", "Perform the Hilbert mapping from p dimensions to one dimension. hc = HilbertCurve(precision,", "as the unit step function, and takes the value 0 for negative arguments", "elif precision == 16: dtype = np.uint16 elif precision == 32: dtype =", "= np.uint16 elif precision == 32: dtype = np.uint32 elif precision == 64:", "the second stage of the procedure. This source code is licensed under the", "precision == 8: dtype = np.uint8 elif precision == 16: dtype = np.uint16", "covariates in the reverse order from those named \"synth####.pdf\" and \"synth####.txt\". The files", "\"reverse####.pdf\" and \"reverse####.txt\" condition on the covariates in the reverse order from those", "import math import numpy as np from numpy.random import default_rng import os from", "the graph of cumulative differences. majorticks = 10 minorticks = 100 kuiper, kolmogorov_smirnov,", "= math.ceil(pmax * math.log(2) / math.log(10)) if reverse and diff: name = 'reverse'", "10 minorticks = 100 kuiper, kolmogorov_smirnov, lenscale = cumulative( r, s, inds, majorticks,", "for plotting. imin = np.min(ints) imax = np.max(ints) s = (np.sort(ints) - imin)", "positive arguments). The result is a 1000 x 1 vector of 0s and", "the discretization (mantissa). precision = 64 # Determine the data type from precision.", "minorticks, filename=filename) # Save metrics in a text file. filename = filename[:-4] +", "True), (True, True), (False, False)]: print(f'reverse = {reverse}') print(f'diff = {diff}') pmax =", "script creates a directory, \"unweighted\", in the working directory if the directory does", "(p) of covariates. for p in [2**k for k in range(1, pmax +", "which consists of only a single stage for the files whose names begin", "# Generate data at random. x = rng.integers(2**precision - 1, size=(m, p), dtype=dtype)", "are \"synth####.pdf\", \"synth####.txt\", \"reverse####.pdf\", \"reverse####.jpg\", \"randwalk####.pdf\", and \"randwalk####.txt\", where \"####\" ranges through the", "of two separate stages for the files whose names begin \"synth...\" or \"reverse...\"):", "the files whose names begin \"synth...\" or \"reverse...\"): we collect together the covariates", "the filename so that every filename # has the same number of characters", "False)]: print(f'reverse = {reverse}') print(f'diff = {diff}') pmax = 12 # Set the", "reverse and diff: name = 'reverse' elif diff: name = 'synth' else: name", "covariates and the reverse ordering, # as well as a complete lack of", "independent and identically distributed draws from the standard normal distribution, and finally then", "s, inds, majorticks, minorticks, filename=filename) # Save metrics in a text file. filename", "bits in the discretization (mantissa). precision = 64 # Determine the data type", "roundoff errors. eps = np.finfo(np.float64).eps s = s + np.arange(0, s.size * eps,", "default_rng(seed=543216789) # Generate a random permutation for the indices of the subpopulation. inds", "hc.distances_from_points(x) assert np.unique(ints).size == x.shape[0] # Sort according to the scores. perm =", "np.max(ints) s = (np.sort(ints) - imin) / (imax - imin) # Ensure uniqueness", "metrics in a text file. filename = filename[:-4] + '.txt' with open(filename, 'w')", "The filenames are \"synth####.pdf\", \"synth####.txt\", \"reverse####.pdf\", \"reverse####.jpg\", \"randwalk####.pdf\", and \"randwalk####.txt\", where \"####\" ranges", "= 'randwalk' filename = dir + name + str(p).zfill(max_digits) + '.pdf' # Construct", "majorticks, minorticks, filename=filename) # Save metrics in a text file. filename = filename[:-4]", "the MIT license found in the LICENSE file in the root directory of", "and \"synth####.txt\". The files named \"randwalk####.pdf\" and \"randwalk####.txt\" use the same distribution of", "for precision = {precision}.') # Create a directory as needed. dir = 'unweighted'", "about the plots. The files named \"reverse####.pdf\" and \"reverse####.txt\" condition on the covariates", "HilbertCurve from subpop import cumulative # Set the number of examples. m =", "[(False, True), (True, True), (False, False)]: print(f'reverse = {reverse}') print(f'diff = {diff}') pmax", "for the indices of the subpopulation. inds = rng.permutation((m))[:n] # Generate data at", "'.pdf' # Construct the graph of cumulative differences. majorticks = 10 minorticks =", "the directory does not already exist, then creates many files there. The filenames", "working directory if the directory does not already exist, then creates many files", "graph of cumulative differences. majorticks = 10 minorticks = 100 kuiper, kolmogorov_smirnov, lenscale", "directory does not already exist, then creates many files there. The filenames are", "64: dtype = np.uint64 else: raise TypeError(f'There is no support for precision =", "separate stages for the files whose names begin \"synth...\" or \"reverse...\"): we collect", "\"randwalk####.txt\", where \"####\" ranges through the powers of 2 from 0002 to 4096.", "a range of synthetic toy examples. Copyright (c) Meta Platforms, Inc. and affiliates.", "size of the subpopulation. n = 100 # Set the number of bits", "p matrix x, construct the p x 1 vector v whose entries are", "1), where p is the number of covariates. We condition on all the", "individual members and a subpopulation of 100 subselected uniformly at random from the", "all the covariates. We generate the responses via the following procedure, which consists", "the scores. perm = np.argsort(ints) x = x[perm, :] invperm = np.arange(len(perm)) invperm[perm]", "of p independent and identically distributed draws from the uniform distribution over the", "100 kuiper, kolmogorov_smirnov, lenscale = cumulative( r, s, inds, majorticks, minorticks, filename=filename) #", "open(filename, 'w') as f: f.write('m:\\n') f.write(f'{len(s)}\\n') f.write('n:\\n') f.write(f'{len(inds)}\\n') f.write('lenscale:\\n') f.write(f'{lenscale}\\n') f.write('Kuiper:\\n') f.write(f'{kuiper:.4}\\n') f.write('Kolmogorov-Smirnov:\\n')", "apply the Heaviside function to every entry of \"centered\" (= x-0.5) applied to", "range(1, pmax + 1)]: print(f'p = {p}') # Set up the random number", "examples. m = 1000 # Set the size of the subpopulation. n =", "the files whose names begin \"randwalk...\", but consists of two separate stages for", "for its length. max_digits = math.ceil(pmax * math.log(2) / math.log(10)) if reverse and", "np.uint8 elif precision == 16: dtype = np.uint16 elif precision == 32: dtype", "not already exist, then creates many files there. The filenames are \"synth####.pdf\", \"synth####.txt\",", "the responses for the corresponding members of the full population. That concludes the", "= np.sort(inds) # Construct scores for plotting. imin = np.min(ints) imax = np.max(ints)", "under the MIT license found in the LICENSE file in the root directory", "value 1 for positive arguments). The result is a 1000 x 1 vector", "(np.sort(ints) - imin) / (imax - imin) # Ensure uniqueness even after roundoff", "and a subpopulation of 100 subselected uniformly at random from the full population.", "standard normal distribution, and finally then apply the Heaviside function to every entry", "x.astype(np.float64) - 2**(precision - 1) r = (np.sign(centered @ w) + 1) /", "the value 1 for positive arguments). The result is a 1000 x 1", "import default_rng import os from hilbertcurve.hilbertcurve import HilbertCurve from subpop import cumulative #", "\"randwalk####.pdf\", and \"randwalk####.txt\", where \"####\" ranges through the powers of 2 from 0002", "eps = np.finfo(np.float64).eps s = s + np.arange(0, s.size * eps, eps) s", "+ np.arange(0, s.size * eps, eps) s = s.astype(np.float64) # Form a random", "first stage of the procedure. For the files whose names begin \"synth...\" or", "powers of 2 from 0002 to 4096. Each pdf file plots the cumulative", "same distribution of responses for the subpopulation as for the full population. The", "result is a 1000 x 1 vector of 0s and 1s whose entries", "directory, \"unweighted\", in the working directory if the directory does not already exist,", "corresponding members of the full population. That concludes the first stage of the", "the number in the filename so that every filename # has the same", "- 2**(precision - 1) r = (np.sign(centered @ w) + 1) / 2", "the unit step function, and takes the value 0 for negative arguments and", "uniform distribution over the interval (0, 1), where p is the number of", "v (the Heaviside function is also known as the unit step function, and", "the value 0 for negative arguments and the value 1 for positive arguments).", "in [(False, True), (True, True), (False, False)]: print(f'reverse = {reverse}') print(f'diff = {diff}')", "data consists of a full population of 1,000 individual members and a subpopulation", "the standard normal distribution, and finally then apply the Heaviside function to every", "names begin \"randwalk...\", but consists of two separate stages for the files whose", "synthetic toy examples. Copyright (c) Meta Platforms, Inc. and affiliates. This script creates", "That concludes the first stage of the procedure. For the files whose names", "of synthetic toy examples. Copyright (c) Meta Platforms, Inc. and affiliates. This script", "100 # Set the number of bits in the discretization (mantissa). precision =", "diff: name = 'reverse' elif diff: name = 'synth' else: name = 'randwalk'", "ranges through the powers of 2 from 0002 to 4096. Each pdf file", "deviations for a range of synthetic toy examples. Copyright (c) Meta Platforms, Inc.", "diff) in [(False, True), (True, True), (False, False)]: print(f'reverse = {reverse}') print(f'diff =", "condition on the covariates in the reverse order from those named \"synth####.pdf\" and", "= s + np.arange(0, s.size * eps, eps) s = s.astype(np.float64) # Form", "= np.argsort(ints) x = x[perm, :] invperm = np.arange(len(perm)) invperm[perm] = np.arange(len(perm)) inds", "finally then apply the Heaviside function to every entry of \"centered\" (= x-0.5)", "cumulative differences between the subpopulation and the full population, controlling for the specified", "r[inds] = 1 # Pad with zeros the number in the filename so", "the subpopulation and the full population, controlling for the specified number of covariates.", "1, as the second stage of the procedure. This source code is licensed", "subpopulation of 100 subselected uniformly at random from the full population. Each member", "centered = x.astype(np.float64) - 2**(precision - 1) r = (np.sign(centered @ w) +", "filename = dir + name + str(p).zfill(max_digits) + '.pdf' # Construct the graph", "hilbertcurve.hilbertcurve import HilbertCurve from subpop import cumulative # Set the number of examples.", "for k in range(1, pmax + 1)]: print(f'p = {p}') # Set up", "generator. rng = default_rng(seed=543216789) # Generate a random permutation for the indices of", "errors. eps = np.finfo(np.float64).eps s = s + np.arange(0, s.size * eps, eps)", "filename so that every filename # has the same number of characters for", "(the Heaviside function is also known as the unit step function, and takes", "on all the covariates. We generate the responses via the following procedure, which", "consists of two separate stages for the files whose names begin \"synth...\" or", "1,000 individual members and a subpopulation of 100 subselected uniformly at random from", "files whose names begin \"randwalk...\", but consists of two separate stages for the", "= np.arange(len(perm)) invperm[perm] = np.arange(len(perm)) inds = invperm[inds] inds = np.sort(inds) # Construct", "named \"reverse####.pdf\" and \"reverse####.txt\" condition on the covariates in the reverse order from", "+ 1)]: print(f'p = {p}') # Set up the random number generator. rng", "np.arange(len(perm)) invperm[perm] = np.arange(len(perm)) inds = invperm[inds] inds = np.sort(inds) # Construct scores", "the members into a 1000 x p matrix x, construct the p x", "the procedure. For the files whose names begin \"synth...\" or begin \"reverse...\", we", "rng = default_rng(seed=543216789) # Generate a random permutation for the indices of the", "for negative arguments and the value 1 for positive arguments). The result is", "of the full population. That concludes the first stage of the procedure. For", "filename # has the same number of characters for its length. max_digits =", "max_digits = math.ceil(pmax * math.log(2) / math.log(10)) if reverse and diff: name =", "8: dtype = np.uint8 elif precision == 16: dtype = np.uint16 elif precision", "The corresponding txt files report metrics about the plots. The files named \"reverse####.pdf\"", "= cumulative( r, s, inds, majorticks, minorticks, filename=filename) # Save metrics in a", "x[perm, :] invperm = np.arange(len(perm)) invperm[perm] = np.arange(len(perm)) inds = invperm[inds] inds =", "the powers of 2 from 0002 to 4096. Each pdf file plots the", "s.size * eps, eps) s = s.astype(np.float64) # Form a random direction. w", "invperm = np.arange(len(perm)) invperm[perm] = np.arange(len(perm)) inds = invperm[inds] inds = np.sort(inds) #", "the full population. That concludes the first stage of the procedure. For the", "p independent and identically distributed draws from the uniform distribution over the interval", "from precision. if precision == 8: dtype = np.uint8 elif precision == 16:", "# Ensure uniqueness even after roundoff errors. eps = np.finfo(np.float64).eps s = s", "math.log(2) / math.log(10)) if reverse and diff: name = 'reverse' elif diff: name", "in the LICENSE file in the root directory of this source tree. \"\"\"", "(c) Meta Platforms, Inc. and affiliates. This script creates a directory, \"unweighted\", in", "membership # in the subpopulation. centered = x.astype(np.float64) - 2**(precision - 1) r", "also known as the unit step function, and takes the value 0 for", "numpy as np from numpy.random import default_rng import os from hilbertcurve.hilbertcurve import HilbertCurve", "vector of 0s and 1s whose entries are the responses for the corresponding", "rng.standard_normal(size=(p)) w /= np.linalg.norm(w, ord=2) if reverse: w = w[::-1] # Generate responses", "all members of the subpopulation to 1, as the second stage of the", "x = x[perm, :] invperm = np.arange(len(perm)) invperm[perm] = np.arange(len(perm)) inds = invperm[inds]", "= filename[:-4] + '.txt' with open(filename, 'w') as f: f.write('m:\\n') f.write(f'{len(s)}\\n') f.write('n:\\n') f.write(f'{len(inds)}\\n')", "number of covariates. The corresponding txt files report metrics about the plots. The", "on the covariates in the reverse order from those named \"synth####.pdf\" and \"synth####.txt\".", "does not already exist, then creates many files there. The filenames are \"synth####.pdf\",", "the original ordering of covariates and the reverse ordering, # as well as", "whose names begin \"synth...\" or begin \"reverse...\", we set the responses for all", "of the procedure. This source code is licensed under the MIT license found", "f.write('Kuiper:\\n') f.write(f'{kuiper:.4}\\n') f.write('Kolmogorov-Smirnov:\\n') f.write(f'{kolmogorov_smirnov:.4}\\n') f.write('Kuiper / lenscale:\\n') f.write(f'{(kuiper / lenscale):.4}\\n') f.write('Kolmogorov-Smirnov / lenscale:\\n')", "\"randwalk####.txt\" use the same distribution of responses for the subpopulation as for the", "Heaviside function is also known as the unit step function, and takes the", "number of bits in the discretization (mantissa). precision = 64 # Determine the", "pass dir += '/' # Consider both the original ordering of covariates and", "= {reverse}') print(f'diff = {diff}') pmax = 12 # Set the number (p)", "the subpopulation. inds = rng.permutation((m))[:n] # Generate data at random. x = rng.integers(2**precision", "Sort according to the scores. perm = np.argsort(ints) x = x[perm, :] invperm", "# Form a random direction. w = rng.standard_normal(size=(p)) w /= np.linalg.norm(w, ord=2) if", "if diff: r[inds] = 1 # Pad with zeros the number in the", "the covariates for all the members into a 1000 x p matrix x,", "of covariates. for p in [2**k for k in range(1, pmax + 1)]:", "interval (0, 1), where p is the number of covariates. We condition on", "whose entries are independent and identically distributed draws from the standard normal distribution,", "stage of the procedure. This source code is licensed under the MIT license", "files named \"randwalk####.pdf\" and \"randwalk####.txt\" use the same distribution of responses for the", "\"\"\" import math import numpy as np from numpy.random import default_rng import os", "\"randwalk####.pdf\" and \"randwalk####.txt\" use the same distribution of responses for the subpopulation as", "specified number of covariates. The corresponding txt files report metrics about the plots.", "for positive arguments). The result is a 1000 x 1 vector of 0s", "of 0s and 1s whose entries are the responses for the corresponding members", "'reverse' elif diff: name = 'synth' else: name = 'randwalk' filename = dir", "full population consists of p independent and identically distributed draws from the uniform", "a 1000 x p matrix x, construct the p x 1 vector v", "its length. max_digits = math.ceil(pmax * math.log(2) / math.log(10)) if reverse and diff:", "= 64 # Determine the data type from precision. if precision == 8:", "complete lack of significant deviation in the responses # for the subpopulation. for", "as a complete lack of significant deviation in the responses # for the", "np.unique(ints).size == x.shape[0] # Sort according to the scores. perm = np.argsort(ints) x", "np.uint16 elif precision == 32: dtype = np.uint32 elif precision == 64: dtype", "and identically distributed draws from the uniform distribution over the interval (0, 1),", "range of synthetic toy examples. Copyright (c) Meta Platforms, Inc. and affiliates. This", "and 1s whose entries are the responses for the corresponding members of the", "where p is the number of covariates. We condition on all the covariates.", "the reverse order from those named \"synth####.pdf\" and \"synth####.txt\". The files named \"randwalk####.pdf\"", "elif precision == 32: dtype = np.uint32 elif precision == 64: dtype =", "and identically distributed draws from the standard normal distribution, and finally then apply", "\"randwalk...\", but consists of two separate stages for the files whose names begin", "0s and 1s whose entries are the responses for the corresponding members of", "'unweighted' try: os.mkdir(dir) except FileExistsError: pass dir += '/' # Consider both the", "or \"reverse...\"): we collect together the covariates for all the members into a", "= dir + name + str(p).zfill(max_digits) + '.pdf' # Construct the graph of", "unit step function, and takes the value 0 for negative arguments and the", "population consists of p independent and identically distributed draws from the uniform distribution", "of bits in the discretization (mantissa). precision = 64 # Determine the data", "following procedure, which consists of only a single stage for the files whose", "if the directory does not already exist, then creates many files there. The", "named \"randwalk####.pdf\" and \"randwalk####.txt\" use the same distribution of responses for the subpopulation", "is licensed under the MIT license found in the LICENSE file in the", "= {precision}.') # Create a directory as needed. dir = 'unweighted' try: os.mkdir(dir)", "reverse: w = w[::-1] # Generate responses based on the random direction and", "as np from numpy.random import default_rng import os from hilbertcurve.hilbertcurve import HilbertCurve from", "responses via the following procedure, which consists of only a single stage for", "file plots the cumulative differences between the subpopulation and the full population, controlling", "differences. majorticks = 10 minorticks = 100 kuiper, kolmogorov_smirnov, lenscale = cumulative( r,", "p) ints = hc.distances_from_points(x) assert np.unique(ints).size == x.shape[0] # Sort according to the", "= 'unweighted' try: os.mkdir(dir) except FileExistsError: pass dir += '/' # Consider both", "matrix x, construct the p x 1 vector v whose entries are independent", "step function, and takes the value 0 for negative arguments and the value", "we collect together the covariates for all the members into a 1000 x", "entries are independent and identically distributed draws from the standard normal distribution, and", "and \"randwalk####.txt\", where \"####\" ranges through the powers of 2 from 0002 to", "w = rng.standard_normal(size=(p)) w /= np.linalg.norm(w, ord=2) if reverse: w = w[::-1] #", "into a 1000 x p matrix x, construct the p x 1 vector", "a directory, \"unweighted\", in the working directory if the directory does not already", "files named \"reverse####.pdf\" and \"reverse####.txt\" condition on the covariates in the reverse order", "source code is licensed under the MIT license found in the LICENSE file", "Heaviside function to every entry of \"centered\" (= x-0.5) applied to v (the", "directory if the directory does not already exist, then creates many files there.", "from the standard normal distribution, and finally then apply the Heaviside function to", "covariates. The corresponding txt files report metrics about the plots. The files named", "the number of covariates. We condition on all the covariates. We generate the", "np.min(ints) imax = np.max(ints) s = (np.sort(ints) - imin) / (imax - imin)", "np.finfo(np.float64).eps s = s + np.arange(0, s.size * eps, eps) s = s.astype(np.float64)", "1 # Pad with zeros the number in the filename so that every", "subselected uniformly at random from the full population. Each member of the full", "p is the number of covariates. We condition on all the covariates. We", "precision == 16: dtype = np.uint16 elif precision == 32: dtype = np.uint32", "Construct scores for plotting. imin = np.min(ints) imax = np.max(ints) s = (np.sort(ints)", "members of the subpopulation to 1, as the second stage of the procedure.", "identically distributed draws from the standard normal distribution, and finally then apply the", "precision == 64: dtype = np.uint64 else: raise TypeError(f'There is no support for", "assert np.unique(ints).size == x.shape[0] # Sort according to the scores. perm = np.argsort(ints)", "files there. The filenames are \"synth####.pdf\", \"synth####.txt\", \"reverse####.pdf\", \"reverse####.jpg\", \"randwalk####.pdf\", and \"randwalk####.txt\", where" ]
[ "Test RemoteAgent Unit \"\"\" import pytest import requests import secrets from starfish.agent.remote_agent import", "assert(agent.http_client) new_client = object() agent.http_client = new_client assert(agent.http_client) assert(isinstance(agent.http_client, object)) def test_remote_agent_get_adapter(): ddo", "in range(10, secrets.randbelow(60)): name = f'name_{index}' filter[name] = metadata[name] assert(RemoteAgent.is_metadata_match(filter, metadata)) filter['new_value'] =", "ddo = DDO.create('http://localhost:3030') agent = RemoteAgent(ddo) assert(agent.adapter) assert(isinstance(agent.adapter, RemoteAgentAdapter)) def test_remote_agent_get_meta_list(): ddo =", "secrets.token_hex(64) filter = {} for index in range(10, secrets.randbelow(60)): name = f'name_{index}' filter[name]", "ddo = DDO.create('http://localhost') agent = RemoteAgent(ddo) with pytest.raises(StarfishConnectionError): result = agent.get_metadata_list() def test_remote_agent_is_metadata_match():", "for index in range(10, secrets.randbelow(60)): name = f'name_{index}' filter[name] = metadata[name] assert(RemoteAgent.is_metadata_match(filter, metadata))", "object)) def test_remote_agent_get_adapter(): ddo = DDO.create('http://localhost:3030') agent = RemoteAgent(ddo) assert(agent.adapter) assert(isinstance(agent.adapter, RemoteAgentAdapter)) def", "{ 'name': 'test' } metadata = { 'name': 'test', 'more_data': 'test_data' } assert(RemoteAgent.is_metadata_match(filter,", "def test_remote_agent_get_adapter(): ddo = DDO.create('http://localhost:3030') agent = RemoteAgent(ddo) assert(agent.adapter) assert(isinstance(agent.adapter, RemoteAgentAdapter)) def test_remote_agent_get_meta_list():", "starfish.network.ddo import DDO def test_remote_agent_set_http_client(): ddo = DDO.create('http://localhost:3030') agent = RemoteAgent(ddo) assert(agent.http_client) new_client", "= DDO.create('http://localhost:3030') agent = RemoteAgent(ddo) assert(agent.http_client) new_client = object() agent.http_client = new_client assert(agent.http_client)", "= RemoteAgent(ddo) with pytest.raises(StarfishConnectionError): result = agent.get_metadata_list() def test_remote_agent_is_metadata_match(): filter = { 'name':", "assert(agent.http_client) assert(isinstance(agent.http_client, object)) def test_remote_agent_get_adapter(): ddo = DDO.create('http://localhost:3030') agent = RemoteAgent(ddo) assert(agent.adapter) assert(isinstance(agent.adapter,", "with pytest.raises(StarfishConnectionError): result = agent.get_metadata_list() def test_remote_agent_is_metadata_match(): filter = { 'name': 'test' }", "= new_client assert(agent.http_client) assert(isinstance(agent.http_client, object)) def test_remote_agent_get_adapter(): ddo = DDO.create('http://localhost:3030') agent = RemoteAgent(ddo)", "index in range(10, secrets.randbelow(60)): name = f'name_{index}' filter[name] = metadata[name] assert(RemoteAgent.is_metadata_match(filter, metadata)) filter['new_value']", "= secrets.token_hex(64) filter = {} for index in range(10, secrets.randbelow(60)): name = f'name_{index}'", "f'name_{index}' filter[name] = metadata[name] assert(RemoteAgent.is_metadata_match(filter, metadata)) filter['new_value'] = 'cannot match' assert(not RemoteAgent.is_metadata_match(filter, metadata))", "from starfish.exceptions import StarfishConnectionError from starfish.middleware.agent.remote_agent_adapter import RemoteAgentAdapter from starfish.network.ddo import DDO def", "test_remote_agent_get_adapter(): ddo = DDO.create('http://localhost:3030') agent = RemoteAgent(ddo) assert(agent.adapter) assert(isinstance(agent.adapter, RemoteAgentAdapter)) def test_remote_agent_get_meta_list(): ddo", "\"\"\" import pytest import requests import secrets from starfish.agent.remote_agent import RemoteAgent from starfish.exceptions", "= {} for index in range(0, 100): name = f'name_{index}' metadata[name] = secrets.token_hex(64)", "= {} for index in range(10, secrets.randbelow(60)): name = f'name_{index}' filter[name] = metadata[name]", "= f'name_{index}' metadata[name] = secrets.token_hex(64) filter = {} for index in range(10, secrets.randbelow(60)):", "RemoteAgent(ddo) assert(agent.adapter) assert(isinstance(agent.adapter, RemoteAgentAdapter)) def test_remote_agent_get_meta_list(): ddo = DDO.create('http://localhost') agent = RemoteAgent(ddo) with", "name = f'name_{index}' metadata[name] = secrets.token_hex(64) filter = {} for index in range(10,", "<filename>tests/unit/agent/test_remote_agent.py \"\"\" Test RemoteAgent Unit \"\"\" import pytest import requests import secrets from", "DDO.create('http://localhost:3030') agent = RemoteAgent(ddo) assert(agent.http_client) new_client = object() agent.http_client = new_client assert(agent.http_client) assert(isinstance(agent.http_client,", "starfish.middleware.agent.remote_agent_adapter import RemoteAgentAdapter from starfish.network.ddo import DDO def test_remote_agent_set_http_client(): ddo = DDO.create('http://localhost:3030') agent", "secrets from starfish.agent.remote_agent import RemoteAgent from starfish.exceptions import StarfishConnectionError from starfish.middleware.agent.remote_agent_adapter import RemoteAgentAdapter", "RemoteAgent from starfish.exceptions import StarfishConnectionError from starfish.middleware.agent.remote_agent_adapter import RemoteAgentAdapter from starfish.network.ddo import DDO", "range(10, secrets.randbelow(60)): name = f'name_{index}' filter[name] = metadata[name] assert(RemoteAgent.is_metadata_match(filter, metadata)) filter['new_value'] = 'cannot", "secrets.randbelow(60)): name = f'name_{index}' filter[name] = metadata[name] assert(RemoteAgent.is_metadata_match(filter, metadata)) filter['new_value'] = 'cannot match'", "starfish.agent.remote_agent import RemoteAgent from starfish.exceptions import StarfishConnectionError from starfish.middleware.agent.remote_agent_adapter import RemoteAgentAdapter from starfish.network.ddo", "agent = RemoteAgent(ddo) with pytest.raises(StarfishConnectionError): result = agent.get_metadata_list() def test_remote_agent_is_metadata_match(): filter = {", "new_client assert(agent.http_client) assert(isinstance(agent.http_client, object)) def test_remote_agent_get_adapter(): ddo = DDO.create('http://localhost:3030') agent = RemoteAgent(ddo) assert(agent.adapter)", "pytest.raises(StarfishConnectionError): result = agent.get_metadata_list() def test_remote_agent_is_metadata_match(): filter = { 'name': 'test' } metadata", "import requests import secrets from starfish.agent.remote_agent import RemoteAgent from starfish.exceptions import StarfishConnectionError from", "Unit \"\"\" import pytest import requests import secrets from starfish.agent.remote_agent import RemoteAgent from", "assert(isinstance(agent.adapter, RemoteAgentAdapter)) def test_remote_agent_get_meta_list(): ddo = DDO.create('http://localhost') agent = RemoteAgent(ddo) with pytest.raises(StarfishConnectionError): result", "from starfish.network.ddo import DDO def test_remote_agent_set_http_client(): ddo = DDO.create('http://localhost:3030') agent = RemoteAgent(ddo) assert(agent.http_client)", "import secrets from starfish.agent.remote_agent import RemoteAgent from starfish.exceptions import StarfishConnectionError from starfish.middleware.agent.remote_agent_adapter import", "'name': 'test', 'more_data': 'test_data' } assert(RemoteAgent.is_metadata_match(filter, metadata)) metadata = {} for index in", "RemoteAgent(ddo) with pytest.raises(StarfishConnectionError): result = agent.get_metadata_list() def test_remote_agent_is_metadata_match(): filter = { 'name': 'test'", "= agent.get_metadata_list() def test_remote_agent_is_metadata_match(): filter = { 'name': 'test' } metadata = {", "f'name_{index}' metadata[name] = secrets.token_hex(64) filter = {} for index in range(10, secrets.randbelow(60)): name", "def test_remote_agent_is_metadata_match(): filter = { 'name': 'test' } metadata = { 'name': 'test',", "object() agent.http_client = new_client assert(agent.http_client) assert(isinstance(agent.http_client, object)) def test_remote_agent_get_adapter(): ddo = DDO.create('http://localhost:3030') agent", "in range(0, 100): name = f'name_{index}' metadata[name] = secrets.token_hex(64) filter = {} for", "import RemoteAgentAdapter from starfish.network.ddo import DDO def test_remote_agent_set_http_client(): ddo = DDO.create('http://localhost:3030') agent =", "} metadata = { 'name': 'test', 'more_data': 'test_data' } assert(RemoteAgent.is_metadata_match(filter, metadata)) metadata =", "metadata[name] = secrets.token_hex(64) filter = {} for index in range(10, secrets.randbelow(60)): name =", "RemoteAgent Unit \"\"\" import pytest import requests import secrets from starfish.agent.remote_agent import RemoteAgent", "RemoteAgentAdapter from starfish.network.ddo import DDO def test_remote_agent_set_http_client(): ddo = DDO.create('http://localhost:3030') agent = RemoteAgent(ddo)", "def test_remote_agent_set_http_client(): ddo = DDO.create('http://localhost:3030') agent = RemoteAgent(ddo) assert(agent.http_client) new_client = object() agent.http_client", "'more_data': 'test_data' } assert(RemoteAgent.is_metadata_match(filter, metadata)) metadata = {} for index in range(0, 100):", "= { 'name': 'test', 'more_data': 'test_data' } assert(RemoteAgent.is_metadata_match(filter, metadata)) metadata = {} for", "requests import secrets from starfish.agent.remote_agent import RemoteAgent from starfish.exceptions import StarfishConnectionError from starfish.middleware.agent.remote_agent_adapter", "assert(isinstance(agent.http_client, object)) def test_remote_agent_get_adapter(): ddo = DDO.create('http://localhost:3030') agent = RemoteAgent(ddo) assert(agent.adapter) assert(isinstance(agent.adapter, RemoteAgentAdapter))", "new_client = object() agent.http_client = new_client assert(agent.http_client) assert(isinstance(agent.http_client, object)) def test_remote_agent_get_adapter(): ddo =", "= DDO.create('http://localhost') agent = RemoteAgent(ddo) with pytest.raises(StarfishConnectionError): result = agent.get_metadata_list() def test_remote_agent_is_metadata_match(): filter", "= f'name_{index}' filter[name] = metadata[name] assert(RemoteAgent.is_metadata_match(filter, metadata)) filter['new_value'] = 'cannot match' assert(not RemoteAgent.is_metadata_match(filter,", "StarfishConnectionError from starfish.middleware.agent.remote_agent_adapter import RemoteAgentAdapter from starfish.network.ddo import DDO def test_remote_agent_set_http_client(): ddo =", "{} for index in range(0, 100): name = f'name_{index}' metadata[name] = secrets.token_hex(64) filter", "'test' } metadata = { 'name': 'test', 'more_data': 'test_data' } assert(RemoteAgent.is_metadata_match(filter, metadata)) metadata", "metadata)) metadata = {} for index in range(0, 100): name = f'name_{index}' metadata[name]", "filter = {} for index in range(10, secrets.randbelow(60)): name = f'name_{index}' filter[name] =", "agent = RemoteAgent(ddo) assert(agent.adapter) assert(isinstance(agent.adapter, RemoteAgentAdapter)) def test_remote_agent_get_meta_list(): ddo = DDO.create('http://localhost') agent =", "pytest import requests import secrets from starfish.agent.remote_agent import RemoteAgent from starfish.exceptions import StarfishConnectionError", "RemoteAgentAdapter)) def test_remote_agent_get_meta_list(): ddo = DDO.create('http://localhost') agent = RemoteAgent(ddo) with pytest.raises(StarfishConnectionError): result =", "100): name = f'name_{index}' metadata[name] = secrets.token_hex(64) filter = {} for index in", "import StarfishConnectionError from starfish.middleware.agent.remote_agent_adapter import RemoteAgentAdapter from starfish.network.ddo import DDO def test_remote_agent_set_http_client(): ddo", "starfish.exceptions import StarfishConnectionError from starfish.middleware.agent.remote_agent_adapter import RemoteAgentAdapter from starfish.network.ddo import DDO def test_remote_agent_set_http_client():", "def test_remote_agent_get_meta_list(): ddo = DDO.create('http://localhost') agent = RemoteAgent(ddo) with pytest.raises(StarfishConnectionError): result = agent.get_metadata_list()", "DDO.create('http://localhost') agent = RemoteAgent(ddo) with pytest.raises(StarfishConnectionError): result = agent.get_metadata_list() def test_remote_agent_is_metadata_match(): filter =", "DDO.create('http://localhost:3030') agent = RemoteAgent(ddo) assert(agent.adapter) assert(isinstance(agent.adapter, RemoteAgentAdapter)) def test_remote_agent_get_meta_list(): ddo = DDO.create('http://localhost') agent", "test_remote_agent_set_http_client(): ddo = DDO.create('http://localhost:3030') agent = RemoteAgent(ddo) assert(agent.http_client) new_client = object() agent.http_client =", "metadata = { 'name': 'test', 'more_data': 'test_data' } assert(RemoteAgent.is_metadata_match(filter, metadata)) metadata = {}", "import RemoteAgent from starfish.exceptions import StarfishConnectionError from starfish.middleware.agent.remote_agent_adapter import RemoteAgentAdapter from starfish.network.ddo import", "DDO def test_remote_agent_set_http_client(): ddo = DDO.create('http://localhost:3030') agent = RemoteAgent(ddo) assert(agent.http_client) new_client = object()", "'test_data' } assert(RemoteAgent.is_metadata_match(filter, metadata)) metadata = {} for index in range(0, 100): name", "agent.get_metadata_list() def test_remote_agent_is_metadata_match(): filter = { 'name': 'test' } metadata = { 'name':", "import pytest import requests import secrets from starfish.agent.remote_agent import RemoteAgent from starfish.exceptions import", "ddo = DDO.create('http://localhost:3030') agent = RemoteAgent(ddo) assert(agent.http_client) new_client = object() agent.http_client = new_client", "{ 'name': 'test', 'more_data': 'test_data' } assert(RemoteAgent.is_metadata_match(filter, metadata)) metadata = {} for index", "'name': 'test' } metadata = { 'name': 'test', 'more_data': 'test_data' } assert(RemoteAgent.is_metadata_match(filter, metadata))", "metadata = {} for index in range(0, 100): name = f'name_{index}' metadata[name] =", "index in range(0, 100): name = f'name_{index}' metadata[name] = secrets.token_hex(64) filter = {}", "result = agent.get_metadata_list() def test_remote_agent_is_metadata_match(): filter = { 'name': 'test' } metadata =", "test_remote_agent_get_meta_list(): ddo = DDO.create('http://localhost') agent = RemoteAgent(ddo) with pytest.raises(StarfishConnectionError): result = agent.get_metadata_list() def", "agent = RemoteAgent(ddo) assert(agent.http_client) new_client = object() agent.http_client = new_client assert(agent.http_client) assert(isinstance(agent.http_client, object))", "from starfish.agent.remote_agent import RemoteAgent from starfish.exceptions import StarfishConnectionError from starfish.middleware.agent.remote_agent_adapter import RemoteAgentAdapter from", "for index in range(0, 100): name = f'name_{index}' metadata[name] = secrets.token_hex(64) filter =", "test_remote_agent_is_metadata_match(): filter = { 'name': 'test' } metadata = { 'name': 'test', 'more_data':", "filter = { 'name': 'test' } metadata = { 'name': 'test', 'more_data': 'test_data'", "= DDO.create('http://localhost:3030') agent = RemoteAgent(ddo) assert(agent.adapter) assert(isinstance(agent.adapter, RemoteAgentAdapter)) def test_remote_agent_get_meta_list(): ddo = DDO.create('http://localhost')", "{} for index in range(10, secrets.randbelow(60)): name = f'name_{index}' filter[name] = metadata[name] assert(RemoteAgent.is_metadata_match(filter,", "name = f'name_{index}' filter[name] = metadata[name] assert(RemoteAgent.is_metadata_match(filter, metadata)) filter['new_value'] = 'cannot match' assert(not", "assert(RemoteAgent.is_metadata_match(filter, metadata)) metadata = {} for index in range(0, 100): name = f'name_{index}'", "RemoteAgent(ddo) assert(agent.http_client) new_client = object() agent.http_client = new_client assert(agent.http_client) assert(isinstance(agent.http_client, object)) def test_remote_agent_get_adapter():", "} assert(RemoteAgent.is_metadata_match(filter, metadata)) metadata = {} for index in range(0, 100): name =", "range(0, 100): name = f'name_{index}' metadata[name] = secrets.token_hex(64) filter = {} for index", "import DDO def test_remote_agent_set_http_client(): ddo = DDO.create('http://localhost:3030') agent = RemoteAgent(ddo) assert(agent.http_client) new_client =", "from starfish.middleware.agent.remote_agent_adapter import RemoteAgentAdapter from starfish.network.ddo import DDO def test_remote_agent_set_http_client(): ddo = DDO.create('http://localhost:3030')", "'test', 'more_data': 'test_data' } assert(RemoteAgent.is_metadata_match(filter, metadata)) metadata = {} for index in range(0,", "= RemoteAgent(ddo) assert(agent.http_client) new_client = object() agent.http_client = new_client assert(agent.http_client) assert(isinstance(agent.http_client, object)) def", "agent.http_client = new_client assert(agent.http_client) assert(isinstance(agent.http_client, object)) def test_remote_agent_get_adapter(): ddo = DDO.create('http://localhost:3030') agent =", "= object() agent.http_client = new_client assert(agent.http_client) assert(isinstance(agent.http_client, object)) def test_remote_agent_get_adapter(): ddo = DDO.create('http://localhost:3030')", "= { 'name': 'test' } metadata = { 'name': 'test', 'more_data': 'test_data' }", "= RemoteAgent(ddo) assert(agent.adapter) assert(isinstance(agent.adapter, RemoteAgentAdapter)) def test_remote_agent_get_meta_list(): ddo = DDO.create('http://localhost') agent = RemoteAgent(ddo)", "\"\"\" Test RemoteAgent Unit \"\"\" import pytest import requests import secrets from starfish.agent.remote_agent", "assert(agent.adapter) assert(isinstance(agent.adapter, RemoteAgentAdapter)) def test_remote_agent_get_meta_list(): ddo = DDO.create('http://localhost') agent = RemoteAgent(ddo) with pytest.raises(StarfishConnectionError):" ]
[ "signatures_nw=signatures_nw, subclip=(5*60,-10*60)) # cut 5-10 minutes to avoid credits. # Get the signatures", "means that the algorithm will iteratively diversify the frames # used until the", "load the image to reconstruct. This could be any image but out of", "the # same dimensions as the frames that will compose it. # We", "# same dimensions as the frames that will compose it. # We take", "will iteratively diversify the frames # used until the most used frames is", "it. # We take the scene just before \"My name is Maximus...\". import", "Extract one special frame (the one to be reconstructed) from the movie. 3.", "signature of the region/frame. signatures_nh=3 signatures_nw=3 ### STEP 1 - EXTRACTING THE FRAMES", "tells the algorithm to give up after 3000 iterations if it # cannot", "THE MOVIE # For this example we treat gladiator. The result is this", "frames of the movie to the regions of the picture to # reconstruct,", "and each region will be reduced to Nh x Nw # zones from", "different frames are used. 5. Assemble the selected best-matching frames into one big", "9 colors is called the signature of the region/frame. signatures_nh=3 signatures_nw=3 ### STEP", "example we treat gladiator. The result is this mosaic # http://i.imgur.com/Eoglcof.jpg foldername =", "PNG (50Mo) which can then be # downsized by converting it to JPEG.", "1100+ frames of the movie. http://i.imgur.com/Eoglcof.jpg This script goes in five steps: 1.", "4. Run an algorithm to find (using the signatures) wich frames of the", "= get_image_signatures_from_folder(foldername) ### STEP 2 - READING THE IMAGE TO BE RECONSTRUCTED #", "frame of the movie, so that it will have the # same dimensions", "of the images. # The algorithm first attributes to each region of the", "- FIND THE BEST-MATCHING FRAMES. OPTIMIZE. # This step is quite quick because", "mean colors are computed. Here we choose 3 x 3. # The resulting", "steps: 1. Extract one frame every 5 second of the movie. Compute their", "package Pompei. It generates this picture of general Maximus in Gladiator using 1100+", "of 5. Choosing a lower npasses (like npasses=100) can be # good sometimes", "that matches best. Some frames will be used more than once. # Then,", "legally-baught DVD # The next call extracts the frames from the movie. At", "picture and save. The code is well commented to paliate for the lack", "so that it will have the # same dimensions as the frames that", "### STEP 1 - EXTRACTING THE FRAMES OF THE MOVIE # For this", "minutes to avoid credits. # Get the signatures of each frame, already computed", "movie. http://i.imgur.com/Eoglcof.jpg This script goes in five steps: 1. Extract one frame every", "frame frame of the movie, so that it will have the # same", "of 9 colors is called the signature of the region/frame. signatures_nh=3 signatures_nw=3 ###", "foldername, fps=1.0/5, # take one frame every 5 seconds resize_factor=0.2, # downsize all", "= compute_signatures_in_image(image, signatures_nh, signatures_nw, nh, nw) ### STEP 4 - FIND THE BEST-MATCHING", "this mosaic # http://i.imgur.com/Eoglcof.jpg foldername = \"gladiator\" # name of the folder for", "# We take the scene just before \"My name is Maximus...\". import moviepy.editor", "frames of the movie match best with the different regions of the picture", "the lack of documentation. For more, see the functions doctrings. \"\"\" from pompei", "# cannot reach its goal of 5. Choosing a lower npasses (like npasses=100)", "its goal of 5. Choosing a lower npasses (like npasses=100) can be #", "to reconstruct one frame of a movie using a mosaic of other frames", "frame (the one to be reconstructed) from the movie. 3. Split this frame", "signatures) wich frames of the movie match best with the different regions of", "used more than once. # Then, goal=5 means that the algorithm will iteratively", "5. Choosing a lower npasses (like npasses=100) can be # good sometimes to", "FRAMES INTO ONE BIG PNG FILE # This produces the final picture: gladiator.png", "every 5 seconds resize_factor=0.2, # downsize all frames of a factor 1/5 signatures_nh=signatures_nh,", "step. image_folder_signatures = get_image_signatures_from_folder(foldername) ### STEP 2 - READING THE IMAGE TO BE", "each region will be reduced to Nh x Nw # zones from which", "the frames from the movie. At the same time it computes # the", "Python package Pompei. It generates this picture of general Maximus in Gladiator using", "it out if you want to fine-tune the parameters in the next lines.", "fine-tune the parameters in the next lines. image_folder_signatures = movie_to_folder(filename, foldername, fps=1.0/5, #", "and compute the signature of each region. 4. Run an algorithm to find", "1 - EXTRACTING THE FRAMES OF THE MOVIE # For this example we", "which the mean colors are computed. Here we choose 3 x 3. #", "take long and produce a heavy PNG (50Mo) which can then be #", "best_matches = find_best_matches(image_signatures, image_folder_signatures, npasses=3000,goal=5) ### STEP 5 - ASSEMBLE THE FRAMES INTO", "# http://i.imgur.com/Eoglcof.jpg foldername = \"gladiator\" # name of the folder for the frame", "one special frame (the one to be reconstructed) from the movie. 3. Split", "the regions of the picture to # reconstruct, each frame and each region", "best_matches_to_image) # When comparing the frames of the movie to the regions of", "a heavy PNG (50Mo) which can then be # downsized by converting it", "name is Maximus...\". import moviepy.editor as mpy image = mpy.VideoFileClip(filename).get_frame('01:26:43.00') # a numpy", "the signature of each region. 4. Run an algorithm to find (using the", "choose one frame frame of the movie, so that it will have the", "of the picture to # reconstruct, each frame and each region will be", "the video file, from a legally-baught DVD # The next call extracts the", "= movie_to_folder(filename, foldername, fps=1.0/5, # take one frame every 5 seconds resize_factor=0.2, #", "avoid over-diversification. best_matches = find_best_matches(image_signatures, image_folder_signatures, npasses=3000,goal=5) ### STEP 5 - ASSEMBLE THE", "THE REGIONS nh = nw = 60 image_signatures = compute_signatures_in_image(image, signatures_nh, signatures_nw, nh,", "The next call extracts the frames from the movie. At the same time", "TO BE RECONSTRUCTED # Now we load the image to reconstruct. This could", "video file, from a legally-baught DVD # The next call extracts the frames", "extracts the frames from the movie. At the same time it computes #", "a typical script to reconstruct one frame of a movie using a mosaic", "THE BEST-MATCHING FRAMES. OPTIMIZE. # This step is quite quick because we work", "iteratively diversify the frames # used until the most used frames is used", "FILE # This produces the final picture: gladiator.png # This will take long", "best. Some frames will be used more than once. # Then, goal=5 means", "frames and store them in file gladiator/signatures.txt # It's pretty long (5 minutes)", "best with the different regions of the picture to reconstruct. The algorithm also", "x Nw # zones from which the mean colors are computed. Here we", "a movie using a mosaic of other frames with the Python package Pompei.", "one to be reconstructed) from the movie. 3. Split this frame into subregions", "frame into subregions and compute the signature of each region. 4. Run an", "ONE BIG PNG FILE # This produces the final picture: gladiator.png # This", "### STEP 4 - FIND THE BEST-MATCHING FRAMES. OPTIMIZE. # This step is", "of the folder for the frame pictures filename = 'gladiator.flv' # the video", "of general Maximus in Gladiator using 1100+ frames of the movie. http://i.imgur.com/Eoglcof.jpg This", "the movie. http://i.imgur.com/Eoglcof.jpg This script goes in five steps: 1. Extract one frame", "to the regions of the picture to # reconstruct, each frame and each", "# Then, goal=5 means that the algorithm will iteratively diversify the frames #", "The result is this mosaic # http://i.imgur.com/Eoglcof.jpg foldername = \"gladiator\" # name of", "version of the images. # The algorithm first attributes to each region of", "pictures filename = 'gladiator.flv' # the video file, from a legally-baught DVD #", "computed at the previous step. image_folder_signatures = get_image_signatures_from_folder(foldername) ### STEP 2 - READING", "to each region of the final picture the movie # frame that matches", "image but out of # simplicity we choose one frame frame of the", "using 1100+ frames of the movie. http://i.imgur.com/Eoglcof.jpg This script goes in five steps:", "frames of the movie. http://i.imgur.com/Eoglcof.jpg This script goes in five steps: 1. Extract", "# the video file, from a legally-baught DVD # The next call extracts", "This could be any image but out of # simplicity we choose one", "to reconstruct. The algorithm also ensures that many different frames are used. 5.", "(5 minutes) and should only be done once, then you can # comment", "1. Extract one frame every 5 second of the movie. Compute their 'signatures'", "http://i.imgur.com/Eoglcof.jpg This script goes in five steps: 1. Extract one frame every 5", "selected best-matching frames into one big picture and save. The code is well", "OPTIMIZE. # This step is quite quick because we work with signatures (i.e.", "lines. image_folder_signatures = movie_to_folder(filename, foldername, fps=1.0/5, # take one frame every 5 seconds", "# npasses=3000 tells the algorithm to give up after 3000 iterations if it", "more, see the functions doctrings. \"\"\" from pompei import (movie_to_folder, get_image_signatures_from_folder, compute_signatures_in_image, find_best_matches,", "picture to # reconstruct, each frame and each region will be reduced to", "moviepy.editor as mpy image = mpy.VideoFileClip(filename).get_frame('01:26:43.00') # a numpy array. ### STEP 3", "zones from which the mean colors are computed. Here we choose 3 x", "algorithm first attributes to each region of the final picture the movie #", "3. # The resulting set of 9 colors is called the signature of", "the movie. Compute their 'signatures' 2. Extract one special frame (the one to", "until the most used frames is used 5 times or less. # npasses=3000", "the final picture: gladiator.png # This will take long and produce a heavy", "for the lack of documentation. For more, see the functions doctrings. \"\"\" from", "of each frame, already computed at the previous step. image_folder_signatures = get_image_signatures_from_folder(foldername) ###", "long (5 minutes) and should only be done once, then you can #", "the movie match best with the different regions of the picture to reconstruct.", "The algorithm first attributes to each region of the final picture the movie", "FRAMES OF THE MOVIE # For this example we treat gladiator. The result", "# The resulting set of 9 colors is called the signature of the", "\"gladiator\" # name of the folder for the frame pictures filename = 'gladiator.flv'", "out if you want to fine-tune the parameters in the next lines. image_folder_signatures", "as the frames that will compose it. # We take the scene just", "lack of documentation. For more, see the functions doctrings. \"\"\" from pompei import", "region of the final picture the movie # frame that matches best. Some", "cannot reach its goal of 5. Choosing a lower npasses (like npasses=100) can", "the different regions of the picture to reconstruct. The algorithm also ensures that", "same time it computes # the signatures of the frames and store them", "3000 iterations if it # cannot reach its goal of 5. Choosing a", "# zones from which the mean colors are computed. Here we choose 3", "frames from the movie. At the same time it computes # the signatures", "iterations if it # cannot reach its goal of 5. Choosing a lower", "PNG FILE # This produces the final picture: gladiator.png # This will take", "to Nh x Nw # zones from which the mean colors are computed.", "signature of each region. 4. Run an algorithm to find (using the signatures)", "5 times or less. # npasses=3000 tells the algorithm to give up after", "just before \"My name is Maximus...\". import moviepy.editor as mpy image = mpy.VideoFileClip(filename).get_frame('01:26:43.00')", "2. Extract one special frame (the one to be reconstructed) from the movie.", "cut 5-10 minutes to avoid credits. # Get the signatures of each frame,", "of the movie, so that it will have the # same dimensions as", "# When comparing the frames of the movie to the regions of the", "each frame and each region will be reduced to Nh x Nw #", "name of the folder for the frame pictures filename = 'gladiator.flv' # the", "the scene just before \"My name is Maximus...\". import moviepy.editor as mpy image", "quite quick because we work with signatures (i.e. reduced # version of the", "of the frames and store them in file gladiator/signatures.txt # It's pretty long", "signatures of the frames and store them in file gladiator/signatures.txt # It's pretty", "parameters in the next lines. image_folder_signatures = movie_to_folder(filename, foldername, fps=1.0/5, # take one", "downsize all frames of a factor 1/5 signatures_nh=signatures_nh, signatures_nw=signatures_nw, subclip=(5*60,-10*60)) # cut 5-10", "mosaic # http://i.imgur.com/Eoglcof.jpg foldername = \"gladiator\" # name of the folder for the", "the movie # frame that matches best. Some frames will be used more", "choose 3 x 3. # The resulting set of 9 colors is called", "one big picture and save. The code is well commented to paliate for", "of the movie to the regions of the picture to # reconstruct, each", "Compute their 'signatures' 2. Extract one special frame (the one to be reconstructed)", "to avoid credits. # Get the signatures of each frame, already computed at", "from the movie. At the same time it computes # the signatures of", "the frames and store them in file gladiator/signatures.txt # It's pretty long (5", "of the picture to reconstruct. The algorithm also ensures that many different frames", "be used more than once. # Then, goal=5 means that the algorithm will", "SIGNATURES OF THE REGIONS nh = nw = 60 image_signatures = compute_signatures_in_image(image, signatures_nh,", "of documentation. For more, see the functions doctrings. \"\"\" from pompei import (movie_to_folder,", "The resulting set of 9 colors is called the signature of the region/frame.", "subregions and compute the signature of each region. 4. Run an algorithm to", "of other frames with the Python package Pompei. It generates this picture of", "matches best. Some frames will be used more than once. # Then, goal=5", "as mpy image = mpy.VideoFileClip(filename).get_frame('01:26:43.00') # a numpy array. ### STEP 3 -", "of the final picture the movie # frame that matches best. Some frames", "other frames with the Python package Pompei. It generates this picture of general", "region/frame. signatures_nh=3 signatures_nw=3 ### STEP 1 - EXTRACTING THE FRAMES OF THE MOVIE", "using a mosaic of other frames with the Python package Pompei. It generates", "but out of # simplicity we choose one frame frame of the movie,", "Maximus...\". import moviepy.editor as mpy image = mpy.VideoFileClip(filename).get_frame('01:26:43.00') # a numpy array. ###", "or less. # npasses=3000 tells the algorithm to give up after 3000 iterations", "big picture and save. The code is well commented to paliate for the", "gladiator. The result is this mosaic # http://i.imgur.com/Eoglcof.jpg foldername = \"gladiator\" # name", "frame and each region will be reduced to Nh x Nw # zones", "IMAGE AND COMPUTE THE SIGNATURES OF THE REGIONS nh = nw = 60", "heavy PNG (50Mo) which can then be # downsized by converting it to", "reduced to Nh x Nw # zones from which the mean colors are", "get_image_signatures_from_folder(foldername) ### STEP 2 - READING THE IMAGE TO BE RECONSTRUCTED # Now", "frames will be used more than once. # Then, goal=5 means that the", "be done once, then you can # comment it out if you want", "# This step is quite quick because we work with signatures (i.e. reduced", "READING THE IMAGE TO BE RECONSTRUCTED # Now we load the image to", "step is quite quick because we work with signatures (i.e. reduced # version", "\"My name is Maximus...\". import moviepy.editor as mpy image = mpy.VideoFileClip(filename).get_frame('01:26:43.00') # a", "a numpy array. ### STEP 3 - SPLIT THE IMAGE AND COMPUTE THE", "THE FRAMES INTO ONE BIG PNG FILE # This produces the final picture:", "image = mpy.VideoFileClip(filename).get_frame('01:26:43.00') # a numpy array. ### STEP 3 - SPLIT THE", "each region of the final picture the movie # frame that matches best.", "(movie_to_folder, get_image_signatures_from_folder, compute_signatures_in_image, find_best_matches, best_matches_to_image) # When comparing the frames of the movie", "signatures (i.e. reduced # version of the images. # The algorithm first attributes", "\"\"\" This is a typical script to reconstruct one frame of a movie", "algorithm also ensures that many different frames are used. 5. Assemble the selected", "resize_factor=0.2, # downsize all frames of a factor 1/5 signatures_nh=signatures_nh, signatures_nw=signatures_nw, subclip=(5*60,-10*60)) #", "Pompei. It generates this picture of general Maximus in Gladiator using 1100+ frames", "FRAMES. OPTIMIZE. # This step is quite quick because we work with signatures", "AND COMPUTE THE SIGNATURES OF THE REGIONS nh = nw = 60 image_signatures", "script goes in five steps: 1. Extract one frame every 5 second of", "the movie, so that it will have the # same dimensions as the", "factor 1/5 signatures_nh=signatures_nh, signatures_nw=signatures_nw, subclip=(5*60,-10*60)) # cut 5-10 minutes to avoid credits. #", "### STEP 3 - SPLIT THE IMAGE AND COMPUTE THE SIGNATURES OF THE", "the region/frame. signatures_nh=3 signatures_nw=3 ### STEP 1 - EXTRACTING THE FRAMES OF THE", "save. The code is well commented to paliate for the lack of documentation.", "= mpy.VideoFileClip(filename).get_frame('01:26:43.00') # a numpy array. ### STEP 3 - SPLIT THE IMAGE", "STEP 4 - FIND THE BEST-MATCHING FRAMES. OPTIMIZE. # This step is quite", "When comparing the frames of the movie to the regions of the picture", "pretty long (5 minutes) and should only be done once, then you can", "see the functions doctrings. \"\"\" from pompei import (movie_to_folder, get_image_signatures_from_folder, compute_signatures_in_image, find_best_matches, best_matches_to_image)", "Now we load the image to reconstruct. This could be any image but", "find_best_matches(image_signatures, image_folder_signatures, npasses=3000,goal=5) ### STEP 5 - ASSEMBLE THE FRAMES INTO ONE BIG", "# good sometimes to avoid over-diversification. best_matches = find_best_matches(image_signatures, image_folder_signatures, npasses=3000,goal=5) ### STEP", "done once, then you can # comment it out if you want to", "import moviepy.editor as mpy image = mpy.VideoFileClip(filename).get_frame('01:26:43.00') # a numpy array. ### STEP", "a mosaic of other frames with the Python package Pompei. It generates this", "the frames of the movie to the regions of the picture to #", "the signature of the region/frame. signatures_nh=3 signatures_nw=3 ### STEP 1 - EXTRACTING THE", "the algorithm to give up after 3000 iterations if it # cannot reach", "npasses=100) can be # good sometimes to avoid over-diversification. best_matches = find_best_matches(image_signatures, image_folder_signatures,", "comparing the frames of the movie to the regions of the picture to", "out of # simplicity we choose one frame frame of the movie, so", "that it will have the # same dimensions as the frames that will", "call extracts the frames from the movie. At the same time it computes", "could be any image but out of # simplicity we choose one frame", "used. 5. Assemble the selected best-matching frames into one big picture and save.", "to fine-tune the parameters in the next lines. image_folder_signatures = movie_to_folder(filename, foldername, fps=1.0/5,", "signatures of each frame, already computed at the previous step. image_folder_signatures = get_image_signatures_from_folder(foldername)", "the folder for the frame pictures filename = 'gladiator.flv' # the video file,", "it will have the # same dimensions as the frames that will compose", "can be # good sometimes to avoid over-diversification. best_matches = find_best_matches(image_signatures, image_folder_signatures, npasses=3000,goal=5)", "of the movie. Compute their 'signatures' 2. Extract one special frame (the one", "we choose one frame frame of the movie, so that it will have", "Run an algorithm to find (using the signatures) wich frames of the movie", "folder for the frame pictures filename = 'gladiator.flv' # the video file, from", "We take the scene just before \"My name is Maximus...\". import moviepy.editor as", "goal=5 means that the algorithm will iteratively diversify the frames # used until", "mpy image = mpy.VideoFileClip(filename).get_frame('01:26:43.00') # a numpy array. ### STEP 3 - SPLIT", "pompei import (movie_to_folder, get_image_signatures_from_folder, compute_signatures_in_image, find_best_matches, best_matches_to_image) # When comparing the frames of", "from a legally-baught DVD # The next call extracts the frames from the", "five steps: 1. Extract one frame every 5 second of the movie. Compute", "produces the final picture: gladiator.png # This will take long and produce a", "take the scene just before \"My name is Maximus...\". import moviepy.editor as mpy", "the parameters in the next lines. image_folder_signatures = movie_to_folder(filename, foldername, fps=1.0/5, # take", "nh = nw = 60 image_signatures = compute_signatures_in_image(image, signatures_nh, signatures_nw, nh, nw) ###", "THE SIGNATURES OF THE REGIONS nh = nw = 60 image_signatures = compute_signatures_in_image(image,", "the previous step. image_folder_signatures = get_image_signatures_from_folder(foldername) ### STEP 2 - READING THE IMAGE", "- SPLIT THE IMAGE AND COMPUTE THE SIGNATURES OF THE REGIONS nh =", "# a numpy array. ### STEP 3 - SPLIT THE IMAGE AND COMPUTE", "into subregions and compute the signature of each region. 4. Run an algorithm", "will have the # same dimensions as the frames that will compose it.", "EXTRACTING THE FRAMES OF THE MOVIE # For this example we treat gladiator.", "work with signatures (i.e. reduced # version of the images. # The algorithm", "(the one to be reconstructed) from the movie. 3. Split this frame into", "reach its goal of 5. Choosing a lower npasses (like npasses=100) can be", "be any image but out of # simplicity we choose one frame frame", "special frame (the one to be reconstructed) from the movie. 3. Split this", "goal of 5. Choosing a lower npasses (like npasses=100) can be # good", "for the frame pictures filename = 'gladiator.flv' # the video file, from a", "and produce a heavy PNG (50Mo) which can then be # downsized by", "gladiator/signatures.txt # It's pretty long (5 minutes) and should only be done once,", "previous step. image_folder_signatures = get_image_signatures_from_folder(foldername) ### STEP 2 - READING THE IMAGE TO", "all frames of a factor 1/5 signatures_nh=signatures_nh, signatures_nw=signatures_nw, subclip=(5*60,-10*60)) # cut 5-10 minutes", "It generates this picture of general Maximus in Gladiator using 1100+ frames of", "5 second of the movie. Compute their 'signatures' 2. Extract one special frame", "from the movie. 3. Split this frame into subregions and compute the signature", "# downsize all frames of a factor 1/5 signatures_nh=signatures_nh, signatures_nw=signatures_nw, subclip=(5*60,-10*60)) # cut", "nw) ### STEP 4 - FIND THE BEST-MATCHING FRAMES. OPTIMIZE. # This step", "frame that matches best. Some frames will be used more than once. #", "3 - SPLIT THE IMAGE AND COMPUTE THE SIGNATURES OF THE REGIONS nh", "picture the movie # frame that matches best. Some frames will be used", "image to reconstruct. This could be any image but out of # simplicity", "next lines. image_folder_signatures = movie_to_folder(filename, foldername, fps=1.0/5, # take one frame every 5", "compose it. # We take the scene just before \"My name is Maximus...\".", "Then, goal=5 means that the algorithm will iteratively diversify the frames # used", "signatures_nh=signatures_nh, signatures_nw=signatures_nw, subclip=(5*60,-10*60)) # cut 5-10 minutes to avoid credits. # Get the", "from which the mean colors are computed. Here we choose 3 x 3.", "fps=1.0/5, # take one frame every 5 seconds resize_factor=0.2, # downsize all frames", "(i.e. reduced # version of the images. # The algorithm first attributes to", "you can # comment it out if you want to fine-tune the parameters", "Nw # zones from which the mean colors are computed. Here we choose", "5-10 minutes to avoid credits. # Get the signatures of each frame, already", "movie using a mosaic of other frames with the Python package Pompei. It", "of a factor 1/5 signatures_nh=signatures_nh, signatures_nw=signatures_nw, subclip=(5*60,-10*60)) # cut 5-10 minutes to avoid", "# For this example we treat gladiator. The result is this mosaic #", "we treat gladiator. The result is this mosaic # http://i.imgur.com/Eoglcof.jpg foldername = \"gladiator\"", "before \"My name is Maximus...\". import moviepy.editor as mpy image = mpy.VideoFileClip(filename).get_frame('01:26:43.00') #", "in five steps: 1. Extract one frame every 5 second of the movie.", "them in file gladiator/signatures.txt # It's pretty long (5 minutes) and should only", "want to fine-tune the parameters in the next lines. image_folder_signatures = movie_to_folder(filename, foldername,", "a factor 1/5 signatures_nh=signatures_nh, signatures_nw=signatures_nw, subclip=(5*60,-10*60)) # cut 5-10 minutes to avoid credits.", "# the signatures of the frames and store them in file gladiator/signatures.txt #", "after 3000 iterations if it # cannot reach its goal of 5. Choosing", "to find (using the signatures) wich frames of the movie match best with", "any image but out of # simplicity we choose one frame frame of", "movie. Compute their 'signatures' 2. Extract one special frame (the one to be", "OF THE MOVIE # For this example we treat gladiator. The result is", "# frame that matches best. Some frames will be used more than once.", "BIG PNG FILE # This produces the final picture: gladiator.png # This will", "most used frames is used 5 times or less. # npasses=3000 tells the", "mpy.VideoFileClip(filename).get_frame('01:26:43.00') # a numpy array. ### STEP 3 - SPLIT THE IMAGE AND", "REGIONS nh = nw = 60 image_signatures = compute_signatures_in_image(image, signatures_nh, signatures_nw, nh, nw)", "long and produce a heavy PNG (50Mo) which can then be # downsized", "to give up after 3000 iterations if it # cannot reach its goal", "colors is called the signature of the region/frame. signatures_nh=3 signatures_nw=3 ### STEP 1", "THE IMAGE TO BE RECONSTRUCTED # Now we load the image to reconstruct.", "frame, already computed at the previous step. image_folder_signatures = get_image_signatures_from_folder(foldername) ### STEP 2", "# The algorithm first attributes to each region of the final picture the", "is this mosaic # http://i.imgur.com/Eoglcof.jpg foldername = \"gladiator\" # name of the folder", "algorithm to find (using the signatures) wich frames of the movie match best", "= find_best_matches(image_signatures, image_folder_signatures, npasses=3000,goal=5) ### STEP 5 - ASSEMBLE THE FRAMES INTO ONE", "it computes # the signatures of the frames and store them in file", "up after 3000 iterations if it # cannot reach its goal of 5.", "sometimes to avoid over-diversification. best_matches = find_best_matches(image_signatures, image_folder_signatures, npasses=3000,goal=5) ### STEP 5 -", "FIND THE BEST-MATCHING FRAMES. OPTIMIZE. # This step is quite quick because we", "For more, see the functions doctrings. \"\"\" from pompei import (movie_to_folder, get_image_signatures_from_folder, compute_signatures_in_image,", "and should only be done once, then you can # comment it out", "one frame every 5 second of the movie. Compute their 'signatures' 2. Extract", "to reconstruct. This could be any image but out of # simplicity we", "ASSEMBLE THE FRAMES INTO ONE BIG PNG FILE # This produces the final", "Assemble the selected best-matching frames into one big picture and save. The code", "next call extracts the frames from the movie. At the same time it", "= 60 image_signatures = compute_signatures_in_image(image, signatures_nh, signatures_nw, nh, nw) ### STEP 4 -", "the signatures) wich frames of the movie match best with the different regions", "frame pictures filename = 'gladiator.flv' # the video file, from a legally-baught DVD", "STEP 3 - SPLIT THE IMAGE AND COMPUTE THE SIGNATURES OF THE REGIONS", "a legally-baught DVD # The next call extracts the frames from the movie.", "the Python package Pompei. It generates this picture of general Maximus in Gladiator", "BEST-MATCHING FRAMES. OPTIMIZE. # This step is quite quick because we work with", "called the signature of the region/frame. signatures_nh=3 signatures_nw=3 ### STEP 1 - EXTRACTING", "generates this picture of general Maximus in Gladiator using 1100+ frames of the", "code is well commented to paliate for the lack of documentation. For more,", "scene just before \"My name is Maximus...\". import moviepy.editor as mpy image =", "frames with the Python package Pompei. It generates this picture of general Maximus", "used until the most used frames is used 5 times or less. #", "### STEP 5 - ASSEMBLE THE FRAMES INTO ONE BIG PNG FILE #", "1/5 signatures_nh=signatures_nh, signatures_nw=signatures_nw, subclip=(5*60,-10*60)) # cut 5-10 minutes to avoid credits. # Get", "should only be done once, then you can # comment it out if", "frames into one big picture and save. The code is well commented to", "of a movie using a mosaic of other frames with the Python package", "movie, so that it will have the # same dimensions as the frames", "STEP 5 - ASSEMBLE THE FRAMES INTO ONE BIG PNG FILE # This", "with the different regions of the picture to reconstruct. The algorithm also ensures", "OF THE REGIONS nh = nw = 60 image_signatures = compute_signatures_in_image(image, signatures_nh, signatures_nw,", "2 - READING THE IMAGE TO BE RECONSTRUCTED # Now we load the", "signatures_nh, signatures_nw, nh, nw) ### STEP 4 - FIND THE BEST-MATCHING FRAMES. OPTIMIZE.", "good sometimes to avoid over-diversification. best_matches = find_best_matches(image_signatures, image_folder_signatures, npasses=3000,goal=5) ### STEP 5", "is a typical script to reconstruct one frame of a movie using a", "picture of general Maximus in Gladiator using 1100+ frames of the movie. http://i.imgur.com/Eoglcof.jpg", "regions of the picture to reconstruct. The algorithm also ensures that many different", "the movie. At the same time it computes # the signatures of the", "if you want to fine-tune the parameters in the next lines. image_folder_signatures =", "already computed at the previous step. image_folder_signatures = get_image_signatures_from_folder(foldername) ### STEP 2 -", "give up after 3000 iterations if it # cannot reach its goal of", "picture to reconstruct. The algorithm also ensures that many different frames are used.", "credits. # Get the signatures of each frame, already computed at the previous", "is quite quick because we work with signatures (i.e. reduced # version of", "will be used more than once. # Then, goal=5 means that the algorithm", "subclip=(5*60,-10*60)) # cut 5-10 minutes to avoid credits. # Get the signatures of", "3. Split this frame into subregions and compute the signature of each region.", "store them in file gladiator/signatures.txt # It's pretty long (5 minutes) and should", "will be reduced to Nh x Nw # zones from which the mean", "comment it out if you want to fine-tune the parameters in the next", "one frame every 5 seconds resize_factor=0.2, # downsize all frames of a factor", "every 5 second of the movie. Compute their 'signatures' 2. Extract one special", "THE FRAMES OF THE MOVIE # For this example we treat gladiator. The", "doctrings. \"\"\" from pompei import (movie_to_folder, get_image_signatures_from_folder, compute_signatures_in_image, find_best_matches, best_matches_to_image) # When comparing", "will compose it. # We take the scene just before \"My name is", "frame every 5 second of the movie. Compute their 'signatures' 2. Extract one", "'gladiator.flv' # the video file, from a legally-baught DVD # The next call", "of each region. 4. Run an algorithm to find (using the signatures) wich", "minutes) and should only be done once, then you can # comment it", "we work with signatures (i.e. reduced # version of the images. # The", "their 'signatures' 2. Extract one special frame (the one to be reconstructed) from", "than once. # Then, goal=5 means that the algorithm will iteratively diversify the", "# The next call extracts the frames from the movie. At the same", "compute_signatures_in_image, find_best_matches, best_matches_to_image) # When comparing the frames of the movie to the", "### STEP 2 - READING THE IMAGE TO BE RECONSTRUCTED # Now we", "to paliate for the lack of documentation. For more, see the functions doctrings.", "same dimensions as the frames that will compose it. # We take the", "can # comment it out if you want to fine-tune the parameters in", "get_image_signatures_from_folder, compute_signatures_in_image, find_best_matches, best_matches_to_image) # When comparing the frames of the movie to", "can then be # downsized by converting it to JPEG. best_matches_to_image(\"%s.png\"%foldername, best_matches, foldername)", "have the # same dimensions as the frames that will compose it. #", "image_folder_signatures = movie_to_folder(filename, foldername, fps=1.0/5, # take one frame every 5 seconds resize_factor=0.2,", "nw = 60 image_signatures = compute_signatures_in_image(image, signatures_nh, signatures_nw, nh, nw) ### STEP 4", "and save. The code is well commented to paliate for the lack of", "the most used frames is used 5 times or less. # npasses=3000 tells", "once, then you can # comment it out if you want to fine-tune", "in the next lines. image_folder_signatures = movie_to_folder(filename, foldername, fps=1.0/5, # take one frame", "reconstruct. The algorithm also ensures that many different frames are used. 5. Assemble", "reconstruct, each frame and each region will be reduced to Nh x Nw", "is well commented to paliate for the lack of documentation. For more, see", "an algorithm to find (using the signatures) wich frames of the movie match", "The code is well commented to paliate for the lack of documentation. For", "general Maximus in Gladiator using 1100+ frames of the movie. http://i.imgur.com/Eoglcof.jpg This script", "be reconstructed) from the movie. 3. Split this frame into subregions and compute", "This will take long and produce a heavy PNG (50Mo) which can then", "avoid credits. # Get the signatures of each frame, already computed at the", "with signatures (i.e. reduced # version of the images. # The algorithm first", "more than once. # Then, goal=5 means that the algorithm will iteratively diversify", "the final picture the movie # frame that matches best. Some frames will", "BE RECONSTRUCTED # Now we load the image to reconstruct. This could be", "reconstruct one frame of a movie using a mosaic of other frames with", "is Maximus...\". import moviepy.editor as mpy image = mpy.VideoFileClip(filename).get_frame('01:26:43.00') # a numpy array.", "- READING THE IMAGE TO BE RECONSTRUCTED # Now we load the image", "find (using the signatures) wich frames of the movie match best with the", "# simplicity we choose one frame frame of the movie, so that it", "it # cannot reach its goal of 5. Choosing a lower npasses (like", "# This will take long and produce a heavy PNG (50Mo) which can", "is used 5 times or less. # npasses=3000 tells the algorithm to give", "<reponame>Zulko/pompei \"\"\" This is a typical script to reconstruct one frame of a", "frames that will compose it. # We take the scene just before \"My", "we choose 3 x 3. # The resulting set of 9 colors is", "mosaic of other frames with the Python package Pompei. It generates this picture", "the functions doctrings. \"\"\" from pompei import (movie_to_folder, get_image_signatures_from_folder, compute_signatures_in_image, find_best_matches, best_matches_to_image) #", "images. # The algorithm first attributes to each region of the final picture", "best-matching frames into one big picture and save. The code is well commented", "once. # Then, goal=5 means that the algorithm will iteratively diversify the frames", "image_folder_signatures = get_image_signatures_from_folder(foldername) ### STEP 2 - READING THE IMAGE TO BE RECONSTRUCTED", "this example we treat gladiator. The result is this mosaic # http://i.imgur.com/Eoglcof.jpg foldername", "npasses=3000 tells the algorithm to give up after 3000 iterations if it #", "(using the signatures) wich frames of the movie match best with the different", "(like npasses=100) can be # good sometimes to avoid over-diversification. best_matches = find_best_matches(image_signatures,", "Get the signatures of each frame, already computed at the previous step. image_folder_signatures", "# comment it out if you want to fine-tune the parameters in the", "the movie. 3. Split this frame into subregions and compute the signature of", "the next lines. image_folder_signatures = movie_to_folder(filename, foldername, fps=1.0/5, # take one frame every", "match best with the different regions of the picture to reconstruct. The algorithm", "array. ### STEP 3 - SPLIT THE IMAGE AND COMPUTE THE SIGNATURES OF", "that will compose it. # We take the scene just before \"My name", "It's pretty long (5 minutes) and should only be done once, then you", "if it # cannot reach its goal of 5. Choosing a lower npasses", "\"\"\" from pompei import (movie_to_folder, get_image_signatures_from_folder, compute_signatures_in_image, find_best_matches, best_matches_to_image) # When comparing the", "# Get the signatures of each frame, already computed at the previous step.", "filename = 'gladiator.flv' # the video file, from a legally-baught DVD # The", "resulting set of 9 colors is called the signature of the region/frame. signatures_nh=3", "x 3. # The resulting set of 9 colors is called the signature", "the image to reconstruct. This could be any image but out of #", "times or less. # npasses=3000 tells the algorithm to give up after 3000", "movie # frame that matches best. Some frames will be used more than", "one frame of a movie using a mosaic of other frames with the", "each region. 4. Run an algorithm to find (using the signatures) wich frames", "movie_to_folder(filename, foldername, fps=1.0/5, # take one frame every 5 seconds resize_factor=0.2, # downsize", "a lower npasses (like npasses=100) can be # good sometimes to avoid over-diversification.", "into one big picture and save. The code is well commented to paliate", "file gladiator/signatures.txt # It's pretty long (5 minutes) and should only be done", "reconstruct. This could be any image but out of # simplicity we choose", "COMPUTE THE SIGNATURES OF THE REGIONS nh = nw = 60 image_signatures =", "At the same time it computes # the signatures of the frames and", "wich frames of the movie match best with the different regions of the", "in file gladiator/signatures.txt # It's pretty long (5 minutes) and should only be", "the picture to reconstruct. The algorithm also ensures that many different frames are", "The algorithm also ensures that many different frames are used. 5. Assemble the", "the selected best-matching frames into one big picture and save. The code is", "this frame into subregions and compute the signature of each region. 4. Run", "(50Mo) which can then be # downsized by converting it to JPEG. best_matches_to_image(\"%s.png\"%foldername,", "computes # the signatures of the frames and store them in file gladiator/signatures.txt", "to # reconstruct, each frame and each region will be reduced to Nh", "colors are computed. Here we choose 3 x 3. # The resulting set", "the picture to # reconstruct, each frame and each region will be reduced", "and store them in file gladiator/signatures.txt # It's pretty long (5 minutes) and", "goes in five steps: 1. Extract one frame every 5 second of the", "Here we choose 3 x 3. # The resulting set of 9 colors", "then you can # comment it out if you want to fine-tune the", "'signatures' 2. Extract one special frame (the one to be reconstructed) from the", "simplicity we choose one frame frame of the movie, so that it will", "This script goes in five steps: 1. Extract one frame every 5 second", "from pompei import (movie_to_folder, get_image_signatures_from_folder, compute_signatures_in_image, find_best_matches, best_matches_to_image) # When comparing the frames", "algorithm will iteratively diversify the frames # used until the most used frames", "Gladiator using 1100+ frames of the movie. http://i.imgur.com/Eoglcof.jpg This script goes in five", "= \"gladiator\" # name of the folder for the frame pictures filename =", "of # simplicity we choose one frame frame of the movie, so that", "= nw = 60 image_signatures = compute_signatures_in_image(image, signatures_nh, signatures_nw, nh, nw) ### STEP", "= 'gladiator.flv' # the video file, from a legally-baught DVD # The next", "typical script to reconstruct one frame of a movie using a mosaic of", "the mean colors are computed. Here we choose 3 x 3. # The", "5 - ASSEMBLE THE FRAMES INTO ONE BIG PNG FILE # This produces", "For this example we treat gladiator. The result is this mosaic # http://i.imgur.com/Eoglcof.jpg", "less. # npasses=3000 tells the algorithm to give up after 3000 iterations if", "the algorithm will iteratively diversify the frames # used until the most used", "SPLIT THE IMAGE AND COMPUTE THE SIGNATURES OF THE REGIONS nh = nw", "frame every 5 seconds resize_factor=0.2, # downsize all frames of a factor 1/5", "over-diversification. best_matches = find_best_matches(image_signatures, image_folder_signatures, npasses=3000,goal=5) ### STEP 5 - ASSEMBLE THE FRAMES", "Extract one frame every 5 second of the movie. Compute their 'signatures' 2.", "different regions of the picture to reconstruct. The algorithm also ensures that many", "nh, nw) ### STEP 4 - FIND THE BEST-MATCHING FRAMES. OPTIMIZE. # This", "script to reconstruct one frame of a movie using a mosaic of other", "MOVIE # For this example we treat gladiator. The result is this mosaic", "frames are used. 5. Assemble the selected best-matching frames into one big picture", "5. Assemble the selected best-matching frames into one big picture and save. The", "This step is quite quick because we work with signatures (i.e. reduced #", "- ASSEMBLE THE FRAMES INTO ONE BIG PNG FILE # This produces the", "# name of the folder for the frame pictures filename = 'gladiator.flv' #", "will take long and produce a heavy PNG (50Mo) which can then be", "you want to fine-tune the parameters in the next lines. image_folder_signatures = movie_to_folder(filename,", "time it computes # the signatures of the frames and store them in", "take one frame every 5 seconds resize_factor=0.2, # downsize all frames of a", "STEP 2 - READING THE IMAGE TO BE RECONSTRUCTED # Now we load", "that the algorithm will iteratively diversify the frames # used until the most", "only be done once, then you can # comment it out if you", "Maximus in Gladiator using 1100+ frames of the movie. http://i.imgur.com/Eoglcof.jpg This script goes", "# reconstruct, each frame and each region will be reduced to Nh x", "computed. Here we choose 3 x 3. # The resulting set of 9", "signatures_nh=3 signatures_nw=3 ### STEP 1 - EXTRACTING THE FRAMES OF THE MOVIE #", "each frame, already computed at the previous step. image_folder_signatures = get_image_signatures_from_folder(foldername) ### STEP", "this picture of general Maximus in Gladiator using 1100+ frames of the movie.", "http://i.imgur.com/Eoglcof.jpg foldername = \"gladiator\" # name of the folder for the frame pictures", "# version of the images. # The algorithm first attributes to each region", "region will be reduced to Nh x Nw # zones from which the", "IMAGE TO BE RECONSTRUCTED # Now we load the image to reconstruct. This", "first attributes to each region of the final picture the movie # frame", "are computed. Here we choose 3 x 3. # The resulting set of", "compute the signature of each region. 4. Run an algorithm to find (using", "paliate for the lack of documentation. For more, see the functions doctrings. \"\"\"", "also ensures that many different frames are used. 5. Assemble the selected best-matching", "the frames that will compose it. # We take the scene just before", "produce a heavy PNG (50Mo) which can then be # downsized by converting", "This is a typical script to reconstruct one frame of a movie using", "the same time it computes # the signatures of the frames and store", "the frames # used until the most used frames is used 5 times", "movie match best with the different regions of the picture to reconstruct. The", "functions doctrings. \"\"\" from pompei import (movie_to_folder, get_image_signatures_from_folder, compute_signatures_in_image, find_best_matches, best_matches_to_image) # When", "reduced # version of the images. # The algorithm first attributes to each", "many different frames are used. 5. Assemble the selected best-matching frames into one", "frames of a factor 1/5 signatures_nh=signatures_nh, signatures_nw=signatures_nw, subclip=(5*60,-10*60)) # cut 5-10 minutes to", "final picture the movie # frame that matches best. Some frames will be", "Split this frame into subregions and compute the signature of each region. 4.", "signatures_nw, nh, nw) ### STEP 4 - FIND THE BEST-MATCHING FRAMES. OPTIMIZE. #", "we load the image to reconstruct. This could be any image but out", "of the movie match best with the different regions of the picture to", "frame of a movie using a mosaic of other frames with the Python", "THE IMAGE AND COMPUTE THE SIGNATURES OF THE REGIONS nh = nw =", "the signatures of each frame, already computed at the previous step. image_folder_signatures =", "import (movie_to_folder, get_image_signatures_from_folder, compute_signatures_in_image, find_best_matches, best_matches_to_image) # When comparing the frames of the", "- EXTRACTING THE FRAMES OF THE MOVIE # For this example we treat", "to be reconstructed) from the movie. 3. Split this frame into subregions and", "the images. # The algorithm first attributes to each region of the final", "diversify the frames # used until the most used frames is used 5", "the signatures of the frames and store them in file gladiator/signatures.txt # It's", "numpy array. ### STEP 3 - SPLIT THE IMAGE AND COMPUTE THE SIGNATURES", "region. 4. Run an algorithm to find (using the signatures) wich frames of", "well commented to paliate for the lack of documentation. For more, see the", "be # good sometimes to avoid over-diversification. best_matches = find_best_matches(image_signatures, image_folder_signatures, npasses=3000,goal=5) ###", "set of 9 colors is called the signature of the region/frame. signatures_nh=3 signatures_nw=3", "image_signatures = compute_signatures_in_image(image, signatures_nh, signatures_nw, nh, nw) ### STEP 4 - FIND THE", "dimensions as the frames that will compose it. # We take the scene", "This produces the final picture: gladiator.png # This will take long and produce", "# cut 5-10 minutes to avoid credits. # Get the signatures of each", "in Gladiator using 1100+ frames of the movie. http://i.imgur.com/Eoglcof.jpg This script goes in", "be reduced to Nh x Nw # zones from which the mean colors", "result is this mosaic # http://i.imgur.com/Eoglcof.jpg foldername = \"gladiator\" # name of the", "because we work with signatures (i.e. reduced # version of the images. #", "4 - FIND THE BEST-MATCHING FRAMES. OPTIMIZE. # This step is quite quick", "# take one frame every 5 seconds resize_factor=0.2, # downsize all frames of", "lower npasses (like npasses=100) can be # good sometimes to avoid over-diversification. best_matches", "foldername = \"gladiator\" # name of the folder for the frame pictures filename", "of the movie. http://i.imgur.com/Eoglcof.jpg This script goes in five steps: 1. Extract one", "60 image_signatures = compute_signatures_in_image(image, signatures_nh, signatures_nw, nh, nw) ### STEP 4 - FIND", "Choosing a lower npasses (like npasses=100) can be # good sometimes to avoid", "compute_signatures_in_image(image, signatures_nh, signatures_nw, nh, nw) ### STEP 4 - FIND THE BEST-MATCHING FRAMES.", "algorithm to give up after 3000 iterations if it # cannot reach its", "3 x 3. # The resulting set of 9 colors is called the", "INTO ONE BIG PNG FILE # This produces the final picture: gladiator.png #", "that many different frames are used. 5. Assemble the selected best-matching frames into", "regions of the picture to # reconstruct, each frame and each region will", "# It's pretty long (5 minutes) and should only be done once, then", "reconstructed) from the movie. 3. Split this frame into subregions and compute the", "gladiator.png # This will take long and produce a heavy PNG (50Mo) which", "# used until the most used frames is used 5 times or less.", "at the previous step. image_folder_signatures = get_image_signatures_from_folder(foldername) ### STEP 2 - READING THE", "the movie to the regions of the picture to # reconstruct, each frame", "quick because we work with signatures (i.e. reduced # version of the images.", "npasses (like npasses=100) can be # good sometimes to avoid over-diversification. best_matches =", "npasses=3000,goal=5) ### STEP 5 - ASSEMBLE THE FRAMES INTO ONE BIG PNG FILE", "file, from a legally-baught DVD # The next call extracts the frames from", "# This produces the final picture: gladiator.png # This will take long and", "of the region/frame. signatures_nh=3 signatures_nw=3 ### STEP 1 - EXTRACTING THE FRAMES OF", "frames # used until the most used frames is used 5 times or", "image_folder_signatures, npasses=3000,goal=5) ### STEP 5 - ASSEMBLE THE FRAMES INTO ONE BIG PNG", "which can then be # downsized by converting it to JPEG. best_matches_to_image(\"%s.png\"%foldername, best_matches,", "used frames is used 5 times or less. # npasses=3000 tells the algorithm", "used 5 times or less. # npasses=3000 tells the algorithm to give up", "movie to the regions of the picture to # reconstruct, each frame and", "is called the signature of the region/frame. signatures_nh=3 signatures_nw=3 ### STEP 1 -", "# Now we load the image to reconstruct. This could be any image", "second of the movie. Compute their 'signatures' 2. Extract one special frame (the", "seconds resize_factor=0.2, # downsize all frames of a factor 1/5 signatures_nh=signatures_nh, signatures_nw=signatures_nw, subclip=(5*60,-10*60))", "ensures that many different frames are used. 5. Assemble the selected best-matching frames", "are used. 5. Assemble the selected best-matching frames into one big picture and", "the frame pictures filename = 'gladiator.flv' # the video file, from a legally-baught", "final picture: gladiator.png # This will take long and produce a heavy PNG", "frames is used 5 times or less. # npasses=3000 tells the algorithm to", "signatures_nw=3 ### STEP 1 - EXTRACTING THE FRAMES OF THE MOVIE # For", "one frame frame of the movie, so that it will have the #", "DVD # The next call extracts the frames from the movie. At the", "to avoid over-diversification. best_matches = find_best_matches(image_signatures, image_folder_signatures, npasses=3000,goal=5) ### STEP 5 - ASSEMBLE", "with the Python package Pompei. It generates this picture of general Maximus in", "find_best_matches, best_matches_to_image) # When comparing the frames of the movie to the regions", "STEP 1 - EXTRACTING THE FRAMES OF THE MOVIE # For this example", "Some frames will be used more than once. # Then, goal=5 means that", "Nh x Nw # zones from which the mean colors are computed. Here", "treat gladiator. The result is this mosaic # http://i.imgur.com/Eoglcof.jpg foldername = \"gladiator\" #", "movie. At the same time it computes # the signatures of the frames", "5 seconds resize_factor=0.2, # downsize all frames of a factor 1/5 signatures_nh=signatures_nh, signatures_nw=signatures_nw,", "RECONSTRUCTED # Now we load the image to reconstruct. This could be any", "documentation. For more, see the functions doctrings. \"\"\" from pompei import (movie_to_folder, get_image_signatures_from_folder,", "movie. 3. Split this frame into subregions and compute the signature of each", "picture: gladiator.png # This will take long and produce a heavy PNG (50Mo)", "attributes to each region of the final picture the movie # frame that", "commented to paliate for the lack of documentation. For more, see the functions" ]
[ "spot check 2% of a file of more than 200 variants. :param mafFile:", "in a if k==i]) # if checkIt==1: UpdateProgress(count, len(a), \"INFO: Verifying maf file\")", "the reference sequence + 1 preceding base for the DEL refAnchorPos = str(int(pos)", "\"NA\"][0] total_reads = str(int(ref_reads) + int(alt_reads)) vaf = str(int(alt_reads)/float(total_reads)) elif i_t_vaf!=\"\" and i_t_vaf!=\"NA\"", "= '.' elif (i_t_vaf==\"\" or i_t_vaf==\"NA\") and ref_reads == 'NA' and iref_reads=='NA' and", "':'.join([GT, ','.join([ref_reads, alt_reads]), total_reads, vaf]) # Last check for interesting but unresolved MAF", "== False: if Options.verbose: print(\"ERROR: %s\"%(line)) sys.exit(\"ERROR: Problem in id column\") # Strand", "ref_reads == 'NA' and iref_reads=='NA' and alt_reads == 'NA' and ialt_reads=='NA': GT='./.' ref_reads='.'", "lineOut = [chrom, vcfPos, rsid, vcfRef, vcfAlt, QUAL, '.', INFO, \"GT:AD:DP:VF\", normalGenotype, sampleField]", "within the MAF file. elif ref_reads == 'NA' or alt_reads == 'NA' and", "Options, fileLength): ''' Randomly samples the file to ensure proper reference file is", "and line.startswith('Hugo_Symbol Chromosome Start_position') == False: print(\"\") print(\"ERROR: No header found in maf", "8 March 2018 ''' import os import sys from optparse import OptionParser import", "'1': GT = \"1/1\" # Appears to be homozygous for alternative allele (germline", "genomicPos, ref) if count == len(a): print('') return(toContinue) # else: # print(checkIt) #", "# Obtain the reference sequence + 1 preceding base for the DEL refAnchorPos", "= tAllele2 else: altAllele = tAllele2 refAllele = tAllele1 ref_reads = line[39] alt_reads", "function to wrap around a function. ''' @wraps(function) def function_timer(*args, **kwargs): t0 =", "'NA' or alt_reads == 'NA' and reportedVAF == '1': GT = \"1/1\" #", "tAllele1 refAllele = tAllele2 else: altAllele = tAllele2 refAllele = tAllele1 ref_reads =", "= line[9] # Alt Allele QUAL = line[42] if QUAL == 'None' or", "choosing to filter out as possible... if Options.verbose: print(\"WARNING: Malformed MAF entry. %s\"%('\\t'.join(line)))", "QUAL == 'NA' or QUAL == '': QUAL = '.' if ref ==", ":param genomicPos: Genomic Position of interest. :param ref: Reference sequence to compare to", "# Appears to be homozygous for alternative allele (germline unlikely since it is", "-r <ref.fa>' parser = OptionParser(usage) parser.add_option('-i', '--input_maf', dest=\"maf\", default=None, help=\".maf file to be", ":param i: Current Step :param n: Total number of steps. :param DisplayText: A", "if ref_reads == 'NA': ref_reads = '.' total_reads = alt_reads else: alt_reads =", "None if variantType==\"SNP\": linetowrite = processSNP(line, chrom, pos, rsid, mutType, variantType, strand, errorFile,", "= toPullIDs[12] normalID = toPullIDs[13] count = 0 i = 0 with open(Options.maf,", "(function.__name__, str(round((t1-t0)/60.,2))) ) return result return function_timer def UpdateProgressGetN(fileName): if fileName[len(fileName)-1]==\"z\": cmd =", "pos, rsid, mutType, variantType, strand, errorFile, Options) elif variantType==\"DEL\": linetowrite = processDEL(line, chrom,", "print(\"\") print(\"ERROR: No header found in maf file.\") elif line.startswith('Hugo_Symbol Chromosome Start_position') ==", "= tAllele2 else: altAllele = tAllele2 refAllele = tAllele1 # Obtain the reference", "function_timer(*args, **kwargs): t0 = time.time() result = function(*args, **kwargs) t1 = time.time() print", "# print([k for k in a]) # sys.exit(\"Problem here\") elif i != 0", "%s minutes\" % (function.__name__, str(round((t1-t0)/60.,2))) ) return result return function_timer def UpdateProgressGetN(fileName): if", "0 i = 0 with open(Options.maf, 'r') as inFile: with open(Options.outDir + Options.maf.split('/')[len(Options.maf.split('/'))-1].replace('.maf','.vcf'),", "+ int(alt_reads)) vaf = repr(round(int(alt_reads) / float(total_reads), 4)) if vaf != '1.' and", "\"wc -l %s\" % (fileName) pipe = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout return(int(pipe.read().decode(\"utf-8\").lstrip(\" \").split(\" \")[0]))", "vaf = str(int(alt_reads)/float(total_reads)) elif i_t_vaf!=\"\" and i_t_vaf!=\"NA\" and ref_reads == 'NA' and iref_reads=='NA'", "CreateHeader(outVCF, Options, tumorID, normalID) for line in inFile: UpdateProgress(i, n, \"Processing Maf File\")", "'.' total_reads = '.' elif ( i_t_vaf == \"\" or i_t_vaf == \"NA\")", "file :param genomicPos: Genomic Position of interest. :param ref: Reference sequence to compare", "= '.' total_reads = '.' elif ( i_t_vaf == \"\" or i_t_vaf ==", "False: if Options.verbose: print(\"ERROR: %s\"%(line)) sys.exit(\"ERROR: Problem in id column\") # Strand Information", "variantType + \";MAF_Variant_Classification=\" + mutType + \";DCC_Project_Code=\" + \\ line[44] # Normal variant", "i_t_vaf!=\"NA\" and ref_reads == 'NA' and iref_reads=='NA' and alt_reads == 'NA' and ialt_reads=='NA':", "= line[10] if rsid == '': rsid = '.' elif rsid.startswith(\"rs\") == False:", "if check: if refSeq == ref: return(True) else: print('ERROR: May not be proper", "!= 'NA' or ialt_reads != 'NA'): GT = \"0/1\" ref_reads = [read for", "a progress bar where appropriate. :param i: Current Step :param n: Total number", "run time information about. :param function: Function of interest. :return: A function to", "== 'NA' and ialt_reads == 'NA': vaf = i_t_vaf GT = \"./.\" ref_reads", "subprocess from functools import wraps import datetime import time import numpy as np", "'.' alt_reads = '.' total_reads = '.' elif (i_t_vaf==\"\" or i_t_vaf==\"NA\") and ref_reads", "and determine reads for vaf==1 if (ref_reads != 'NA' or iref_reads != 'NA')", "= None if variantType==\"SNP\": linetowrite = processSNP(line, chrom, pos, rsid, mutType, variantType, strand,", "GT=\"0|1\" else: GT=\"0/1\" sampleField = ':'.join([GT, ','.join([ref_reads, alt_reads]), total_reads, vaf]) # Last check", "# Get phasing information and determine reads for vaf==1 if ref_reads == 'NA'", "altAllele = tAllele2 else: altAllele = tAllele1 # Obtain the reference sequence +", "str(round((t1-t0)/60.,2))) ) return result return function_timer def UpdateProgressGetN(fileName): if fileName[len(fileName)-1]==\"z\": cmd = \"gzip", "GT = \"0/1\" ref_reads = [read for read in [ref_reads, iref_reads] if read", "Options.verbose: print(\"WARNING: %s\" % '\\t'.join(line)) return(None) # Create INFO field INFO = \"MAF_Hugo_Symbol=\"", "vcfPos = refAnchorPos # Get read information iref_reads = line[37] ialt_reads = line[36]", "'NA' and reportedVAF == 'NA': with open(errorFile, 'a') as errerOut: errerOut.write('\\t'.join(line)+'\\n') if Options.verbose:", "used. Will spot check 2% of a file of more than 200 variants.", "rsid = line[10] if rsid == '': rsid = '.' elif rsid.startswith(\"rs\") ==", "'NA' or QUAL == '': QUAL = '.' if tAllele1 == '-': altAllele", "== '': QUAL = '.' if ref == tAllele1: altAllele = tAllele1 refAllele", "mutType, variantType, strand, errorFile, Options): ref = line[7] tAllele1 = line[8] # Normal", "+ \";MAF_Variant_Type=\" + variantType + \";MAF_Variant_Classification=\" + mutType +\";DCC_Project_Code=\" + line[44] # Normal", "has base directly preceding the deletion as the alternative base and the variant", "= ref_reads sampleField = ':'.join([GT, ','.join([ref_reads, alt_reads]), total_reads, vaf]) # Tossing these very", "refAllele = tAllele1 ref_reads = line[39] alt_reads = line[38] reportedVAF = line[28] #", "ialt_reads] if read != \"NA\"][0] total_reads = str(int(ref_reads) + int(alt_reads)) vaf = str(int(alt_reads)", "seem duplicitious, but I explicityly want to know as much of what I'm", "ialt_reads == 'NA': vaf = i_t_vaf GT = \"./.\" ref_reads = '.' alt_reads", "= 0 with open(Options.maf, 'r') as inFile: with open(Options.outDir + Options.maf.split('/')[len(Options.maf.split('/'))-1].replace('.maf','.vcf'), 'w') as", "Problem in id column\") # Strand Information strand = line[4] # Variant Classification/Type", "Allele Typically tAllele2 = line[9] # Alt Allele Typically QUAL = line[42] if", "to get run time information about. :param function: Function of interest. :return: A", "def ProcessFile(Options): n = UpdateProgressGetN(Options.maf) if Options.spotcheck: with open(Options.maf, 'r') as inFile: SpotCheckProperReference(inFile,", "base for the DEL refAnchorPos = str(int(pos) - 1) # Fetch the base", "Options, tumorID, normalID) for line in inFile: UpdateProgress(i, n, \"Processing Maf File\") if", "refSeq[0] # VCF has base directly preceding the deletion as the alternative base", "reads for vaf==1 if ref_reads == 'NA' or alt_reads == 'NA' and reportedVAF", "== \"NA\": normalGenotype = \".:.,.:.:.\" else: normalGenotype = \".:.,.:.:%s\" % (line[41]) lineOut =", "around a function. ''' @wraps(function) def function_timer(*args, **kwargs): t0 = time.time() result =", "reference sequence. ''' proc = subprocess.Popen(['samtools','faidx',refGenome, genomicPos], stdout=subprocess.PIPE) proc.wait() outInfo = proc.stdout.readlines() refSeq", "Typically tAllele2 = line[9] # Alt Allele Typically QUAL = line[42] if QUAL", "print(checkIt) # print(line) # print([k for k in a]) # sys.exit(\"Problem here\") elif", "Normal variant field if anything if line[41] == \"NA\": normalGenotype = \".:.,.:.:.\" else:", "+ line[14] + \";MAF_Variant_Type=\" + variantType + \";MAF_Variant_Classification=\" + mutType +\";DCC_Project_Code=\" + line[44]", "if line.startswith('Hugo_Symbol Chromosome Start_position'): count+=1 i += 1 else: i += 1 linetoWrite", "''' proc = subprocess.Popen(['samtools','faidx',refGenome, genomicPos], stdout=subprocess.PIPE) proc.wait() outInfo = proc.stdout.readlines() refSeq = ''.join([line.decode('utf-8').rstrip('\\n')", "line in inFile: UpdateProgress(i, n, \"Processing Maf File\") if line.startswith('Hugo_Symbol Chromosome Start_position'): count+=1", "SNP, INS, DEL, etc.) mutType = line[5] variantType = line[6] # Create proper", "the order listed\\\">\\n\") ioObject.write(\"##FORMAT=<ID=DP,Number=1,Type=Integer,Description=\\\"Total read depth across this site\\\">\\n\") ioObject.write(\"##FORMAT=<ID=VF,Number=1,Type=Float,Description=\\\"Variant Allele Frequency.\\\">\\n\") ioObject.write(\"#CHROM\\tPOS\\tID\\tREF\\tALT\\tQUAL\\tFILTER\\tINFO\\tFORMAT\\t%s\\t%s\\n\"%(normalID,tumorID))", "line in outInfo[1:]]) if check: if refSeq == ref: return(True) else: print('ERROR: May", "int(alt_reads)) vaf = str(int(alt_reads)/float(total_reads)) elif i_t_vaf!=\"\" and i_t_vaf!=\"NA\" and ref_reads == 'NA' and", "Classification/Type (Type is SNP, INS, DEL, etc.) mutType = line[5] variantType = line[6]", "errerOut.write('\\t'.join(line)+'\\n') else: # This may seem duplicitious, but I explicityly want to know", "to filter out as possible... if Options.verbose: print(\"WARNING: Malformed MAF entry. %s\"%('\\t'.join(line))) print('')", "'NA' or ialt_reads != 'NA'): GT = \"0/1\" ref_reads = [read for read", "%s\" % ('\\t'.join(line))) sampleField = ':'.join([GT, ','.join([ref_reads, alt_reads]), total_reads, vaf]) # Create INFO", "with open(errorFile, 'a') as errerOut: errerOut.write('\\t'.join(line)+'\\n') # print(line) # sys.exit(\"ERROR: Malformed MAF entry.\")", "= str(int(ref_reads) + int(alt_reads)) vaf = repr(round(int(alt_reads) / float(total_reads), 4)) if vaf !=", "'a') as errerOut: errerOut.write('\\t'.join(line)+'\\n') else: # This may seem duplicitious, but I explicityly", "# print(checkIt) # print(line) # print([k for k in a]) # sys.exit(\"Problem here\")", "tAllele2 refAllele = tAllele1 ref_reads = line[39] alt_reads = line[38] reportedVAF = line[28]", "if read != \"NA\"][0] alt_reads = [read for read in [alt_reads, ialt_reads] if", "is None or options.outDir is None or options.refGenome is None: print(\"ERROR: Please include", "reference matching to maf file. Default=False\") parser.add_option('-v', '--verbose', dest='verbose', default=False, action='store_true', help=\"Use this", "1 preceding base for the DEL refAnchorPos = str(int(pos) - 1) # Fetch", "check if needed. :param check: Whether or not to throw error if the", "'None' or QUAL == 'NA' or QUAL == '': QUAL = '.' if", "def main(): print(\"INFO: Processing MAF file.\") FilePath = os.path.dirname(os.path.abspath(__file__)) (Options, Parser) = OptionParsing()", "alternative allele does not match reference sequence. %s\" % ('\\t'.join(line))) sys.exit() # VCF", "that precedes the deletion. refSeq = SamtoolsFaidx(Options.refGenome, chrom + \":\" + refAnchorPos +", "for line in inFile: if i == 1: toPullIDs = line.rstrip('\\n').split('\\t') break else:", "% '\\t'.join(line)) return(None) # Create INFO field INFO = \"MAF_Hugo_Symbol=\" + line[0] +", "refGenome: Reference Fasta file :param genomicPos: Genomic Position of interest. :param ref: Reference", "refSeq = ''.join([line.decode('utf-8').rstrip('\\n') for line in outInfo[1:]]) if check: if refSeq == ref:", "ioObject.write(\"##FORMAT=<ID=DP,Number=1,Type=Integer,Description=\\\"Total read depth across this site\\\">\\n\") ioObject.write(\"##FORMAT=<ID=VF,Number=1,Type=Float,Description=\\\"Variant Allele Frequency.\\\">\\n\") ioObject.write(\"#CHROM\\tPOS\\tID\\tREF\\tALT\\tQUAL\\tFILTER\\tINFO\\tFORMAT\\t%s\\t%s\\n\"%(normalID,tumorID)) @fn_timer def ProcessFile(Options):", "total_reads, vaf]) # Create INFO field INFO = \"MAF_Hugo_Symbol=\" + line[0] + \";MAF_ref_context=\"", "= '.' # Determine type of variant to continue processing. linetowrite = None", "len(a), \"INFO: Verifying maf file\") count+=1 line = line.rstrip('\\n').split('\\t') genomicPos = line[1] +", "if ref_reads == 'NA' or alt_reads == 'NA' and reportedVAF == '1': GT", "or QUAL == '': QUAL = '.' if ref == tAllele1: altAllele =", "t0 = time.time() result = function(*args, **kwargs) t1 = time.time() print (\"INFO: Total", "pos, rsid, mutType, variantType, strand, errorFile, Options) elif variantType==\"INS\": linetowrite = processINS(line, chrom,", "Get phasing information and determine reads for vaf==1 if (ref_reads != 'NA' or", "maf2vcf conversion.\") parser.add_option('-s', '--spotCheckMaf', dest='spotcheck', default=False, action='store_true', help=\"Use this flag to verify reference", "parser.add_option('-s', '--spotCheckMaf', dest='spotcheck', default=False, action='store_true', help=\"Use this flag to verify reference matching to", "total_reads = alt_reads else: alt_reads = '.' total_reads = ref_reads sampleField = ':'.join([GT,", "in original MAF file.\\\">\\n\") ioObject.write(\"##INFO=<ID=MAF_Variant_Classification,Number=1,Type=String,Description=\\\"Variant Classification (if SNP) in original MAF file.\\\">\\n\") ioObject.write(\"##INFO=<ID=DCC_Project_Code,Number=1,Type=String,Description=\\\"DCC", "= '.' total_reads = '.' elif (i_t_vaf==\"\" or i_t_vaf==\"NA\") and ref_reads == 'NA'", "preceding base in the insertion in MAF vcfRef = refSeq[0] # VCF has", "or ialt_reads != 'NA'): GT = \"0/1\" ref_reads = [read for read in", "check for interesting but unresolved MAF line if (ref != tAllele1 and ref", "1) # Fetch the base that precedes the deletion. refSeq = SamtoolsFaidx(Options.refGenome, chrom", "rsid == '': rsid = '.' elif rsid.startswith(\"rs\") == False: if Options.verbose: print(\"ERROR:", "print('') return(toContinue) def processSNP(line, chrom, pos, rsid, mutType, variantType, strand, errorFile, Options): ref", "== 'NA' and alt_reads == 'NA' and ialt_reads == 'NA': GT = './.'", "= line.rstrip('\\n').split('\\t') genomicPos = line[1] + \":\" + line[2] + \"-\" + line[3]", "GT = \"1/1\" # Appears to be homozygous for alternative allele (germline unlikely", "and i_t_vaf!=\"NA\" and ref_reads == 'NA' and iref_reads=='NA' and alt_reads == 'NA' and", "== 'NA': GT = './.' ref_reads = '.' alt_reads = '.' total_reads =", "mafFile: Input mafFile object (opened) :param Options: Parser Options :param fileLength: Length of", "i == 1: toPullIDs = line.rstrip('\\n').split('\\t') break else: header = line i+=1 tumorID", "ioObject.write(\"##FORMAT=<ID=VF,Number=1,Type=Float,Description=\\\"Variant Allele Frequency.\\\">\\n\") ioObject.write(\"#CHROM\\tPOS\\tID\\tREF\\tALT\\tQUAL\\tFILTER\\tINFO\\tFORMAT\\t%s\\t%s\\n\"%(normalID,tumorID)) @fn_timer def ProcessFile(Options): n = UpdateProgressGetN(Options.maf) if Options.spotcheck: with", "the DEL refAnchorPos = str(int(pos) - 1) # Fetch the base that precedes", "ioObject.write(\"#CHROM\\tPOS\\tID\\tREF\\tALT\\tQUAL\\tFILTER\\tINFO\\tFORMAT\\t%s\\t%s\\n\"%(normalID,tumorID)) @fn_timer def ProcessFile(Options): n = UpdateProgressGetN(Options.maf) if Options.spotcheck: with open(Options.maf, 'r') as", "== 'NA' and iref_reads == 'NA' and alt_reads == 'NA' and ialt_reads ==", "= [read for read in [ref_reads, iref_reads] if read != \"NA\"][0] alt_reads =", "return(toContinue) def processSNP(line, chrom, pos, rsid, mutType, variantType, strand, errorFile, Options): ref =", "tAllele2) or (strand != '+' and strand != '-'): with open(errorFile, 'a') as", "and iref_reads == 'NA' and alt_reads == 'NA' and ialt_reads == 'NA': vaf", "import subprocess from functools import wraps import datetime import time import numpy as", "if fileName[len(fileName)-1]==\"z\": cmd = \"gzip -cd %s | wc -l\" % (fileName) pipe", "* int(20 * j), 100 * j, DisplayText)) sys.stdout.flush() def SamtoolsFaidx(refGenome, genomicPos, ref='',", "= line[43] # Get phasing information and determine reads for vaf==1 if (ref_reads", "preceding base plus the reported deletion in the MAF file. vcfRef = refSeq", "optparse import OptionParser import subprocess from functools import wraps import datetime import time", "found in maf file.\") elif line.startswith('Hugo_Symbol Chromosome Start_position') == True: toContinue = True", "directory for .vcf file\") parser.add_option('-r', '--ref_genome', dest=\"refGenome\", default=\"/Users/schencro/Desktop/Bioinformatics_Tools/Ref_Genomes/Ensembl/GRCh37.75/GRCh37.75.fa\", help=\"Reference genome to be used", "or not to throw error if the provided reference matches :param refGenome: Reference", "def function_timer(*args, **kwargs): t0 = time.time() result = function(*args, **kwargs) t1 = time.time()", "line[41]==\"NA\": normalGenotype = \".:.,.:.:.\" else: normalGenotype = \".:.,.:.:%s\"%(line[41]) # Final vcf line out", "tAllele1 and ref != tAllele2) or (strand != '+' and strand != '-'):", "!= 'NA') and (alt_reads != 'NA' or ialt_reads != 'NA'): GT = \"0/1\"", "print('ERROR: Improper reference. Found %s at %s. Reference genome shows %s' % (ref,", "for k in a if k==i]) # if checkIt==1: UpdateProgress(count, len(a), \"INFO: Verifying", "(ref != tAllele1 and ref != tAllele2) or (strand != '+' and strand", "sampleField] return(lineOut) def processINS(line, chrom, pos, rsid, mutType, variantType, strand, errorFile, Options): ref", "+ refAnchorPos + \"-\" + line[3], check=False) # VCF reference is the preceding", "mutType, variantType, strand, errorFile, Options) elif variantType==\"TNP\" or variantType==\"ONP\": with open(errorFile, 'a') as", "outVCF: errorFile = Options.outDir + Options.maf.split('/')[len(Options.maf.split('/')) - 1].replace('.maf', '.ignoredSNVs.maf') with open(errorFile, 'w') as", "Options): ref = line[7] tAllele1 = line[8] # Normal Allele tAllele2 = line[9]", "stdout=subprocess.PIPE).stdout return(int(pipe.read().decode(\"utf-8\").lstrip(\" \").split(\" \")[0])) def UpdateProgress(i, n, DisplayText): ''' Prints a progress bar", "+ \";DCC_Project_Code=\" + \\ line[44] # Normal variant field if anything if line[41]", "refAnchorPos + \"-\" + line[3], check=False) # VCF reference is the preceding base", "pipe = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout return(int(pipe.read().decode(\"utf-8\").lstrip(\" \").split(\" \")[0])) def UpdateProgress(i, n, DisplayText): '''", "elif variantType==\"DEL\": linetowrite = processDEL(line, chrom, pos, rsid, mutType, variantType, strand, errorFile, Options)", "variantType + \";MAF_Variant_Classification=\" + mutType +\";DCC_Project_Code=\" + line[44] # Normal variant field if", "QUAL = line[42] if QUAL == 'None' or QUAL == 'NA' or QUAL", "# Tossing these very strange mutations within the MAF file. elif ref_reads ==", "errorFile, Options) if linetoWrite is not None: outVCF.write('\\t'.join(linetoWrite)+'\\n') print('') print(\"INFO: Sorting vcf file.\")", "-k2,2n\\\"}' | gzip > %s\"%(vcfFile, vcfFileSorted)) os.system(\"rm %s\"%(vcfFile)) os.system(\"gzip %s\"%(errorFile)) def main(): print(\"INFO:", "reference sequence + 1 preceding base for the DEL refAnchorPos = str(int(pos) -", "errorFile, Options) elif variantType==\"TNP\" or variantType==\"ONP\": with open(errorFile, 'a') as errerOut: errerOut.write('\\t'.join(line)+'\\n') else:", "as errerOut: errerOut.write('\\t'.join(line)+'\\n') # print(line) # sys.exit(\"ERROR: Malformed MAF entry.\") return(linetowrite) def CreateHeader(ioObject,", "| gzip > %s\"%(vcfFile, vcfFileSorted)) os.system(\"rm %s\"%(vcfFile)) os.system(\"gzip %s\"%(errorFile)) def main(): print(\"INFO: Processing", "SamtoolsFaidx(refGenome, genomicPos, ref='', check=True): ''' Obtain reference sequence and perform check if needed.", "== 1: toPullIDs = line.rstrip('\\n').split('\\t') break else: header = line i+=1 tumorID =", "DisplayText)) sys.stdout.flush() def SamtoolsFaidx(refGenome, genomicPos, ref='', check=True): ''' Obtain reference sequence and perform", "file.\\\">\\n\") ioObject.write(\"##INFO=<ID=MAF_Genome_Change,Number=1,Type=String,Description=\\\"Genome change in original MAF file.\\\">\\n\") ioObject.write(\"##INFO=<ID=MAF_Variant_Type,Number=1,Type=String,Description=\\\"Variant type (SNP,INS,DEL) in original MAF", "a if k==i]) # if checkIt==1: UpdateProgress(count, len(a), \"INFO: Verifying maf file\") count+=1", "= '.' if variantType == '': variantType = '.' # Determine type of", "default=None, help=\".maf file to be converted.\") parser.add_option('-o', '--output_dir', dest=\"outDir\", default=None, help=\"Output directory for", "= \".:.,.:.:%s\"%(line[41]) # Final vcf line out lineOut = [chrom, pos, rsid, refAllele,", "the insertion in MAF vcfRef = refSeq[0] # VCF has base directly preceding", "the alternative base and the variant pos vcfAlt=refSeq[0] vcfPos=refAnchorPos # Get read information", "want to print out that is informative. :return: None ''' sys.stdout.write('\\r') j =", "iref_reads=='NA' and alt_reads == 'NA' and ialt_reads=='NA': vaf=i_t_vaf GT=\"./.\" ref_reads = '.' alt_reads", "variant pos vcfAlt=refSeq[0] vcfPos=refAnchorPos # Get read information iref_reads = line[37] ialt_reads =", "= \"MAF_Hugo_Symbol=\" + line[0] + \";MAF_ref_context=\" + line[15].upper() + \";MAF_Genome_Change=\" + line[14] +", "CreateVCFLine(line, errorFile, Options) if linetoWrite is not None: outVCF.write('\\t'.join(linetoWrite)+'\\n') print('') print(\"INFO: Sorting vcf", "elif variantType==\"INS\": linetowrite = processINS(line, chrom, pos, rsid, mutType, variantType, strand, errorFile, Options)", "sequence + 1 preceding base for the DEL refAnchorPos = str(int(pos) - 1)", "str(int(ref_reads) + int(alt_reads)) vaf = str(int(alt_reads)/float(total_reads)) elif i_t_vaf!=\"\" and i_t_vaf!=\"NA\" and ref_reads ==", "OptionParsing(): usage = 'usage: %prog -i <*.maf> -o <directory> -r <ref.fa>' parser =", "wraps import datetime import time import numpy as np def OptionParsing(): usage =", "plus the reported deletion in the MAF file. vcfRef = refSeq # VCF", "[chrom, vcfPos, rsid, vcfRef, vcfAlt, QUAL, '.', INFO, \"GT:AD:DP:VF\", normalGenotype, sampleField] return(lineOut) def", "ref: return(True) else: print('ERROR: May not be proper reference genome') print('ERROR: Improper reference.", "of any function you want to get run time information about. :param function:", "= tAllele2 else: altAllele = tAllele1 # Obtain the reference sequence + 1", "# Normal Allele Typically tAllele2 = line[9] # Alt Allele Typically QUAL =", "sys.stdout.flush() def SamtoolsFaidx(refGenome, genomicPos, ref='', check=True): ''' Obtain reference sequence and perform check", "errorFile, Options): ref = line[7] tAllele1 = line[8] # Normal Allele Typically tAllele2", "line[1] + \":\" + line[2] + \"-\" + line[3] ref = line[7] mutType", "if anything if line[41]==\"NA\": normalGenotype = \".:.,.:.:.\" else: normalGenotype = \".:.,.:.:%s\"%(line[41]) # Final", "== '': QUAL = '.' if tAllele1 == '-': altAllele = tAllele2 else:", "== 'None' or QUAL == 'NA' or QUAL == '': QUAL = '.'", "\";MAF_ref_context=\" + line[15].upper() + \";MAF_Genome_Change=\" + line[ 14] + \";MAF_Variant_Type=\" + variantType +", "= 0 count = 0 for line in mafFile: if i != 0", "i != 0 and line.startswith('Hugo_Symbol Chromosome Start_position') == False: print(\"\") print(\"ERROR: No header", "to 1 if ref_reads == 'NA': ref_reads = '.' total_reads = alt_reads else:", "\"GT:AD:DP:VF\", normalGenotype, sampleField] return(lineOut) def processDEL(line, chrom, pos, rsid, mutType, variantType, strand, errorFile,", "vaf]) # Last check for interesting but unresolved MAF line if (ref !=", "alt_reads = '.' total_reads = ref_reads sampleField = ':'.join([GT, ','.join([ref_reads, alt_reads]), total_reads, vaf])", "using python >=3.6. Created by <NAME> 8 March 2018 ''' import os import", "MAF vcfRef = refSeq[0] # VCF has base directly preceding the deletion as", "tAllele2 = line[9] # Alt Allele QUAL = line[42] if QUAL == 'None'", "None: print(\"ERROR: Please include arguments for maf file, output directory, and reference genome", "str(int(pos)-1) # Fetch the base that precedes the deletion. refSeq = SamtoolsFaidx(Options.refGenome, chrom", "# Normal Allele tAllele2 = line[9] # Alt Allele QUAL = line[42] if", "% '\\t'.join(line)) return(None) # Simple SNV cases else: total_reads = str(int(ref_reads) + int(alt_reads))", "print(line) # sys.exit(\"ERROR: Malformed MAF entry.\") return(linetowrite) def CreateHeader(ioObject, Options, tumorID, normalID): now", "ref == tAllele1: altAllele = tAllele1 refAllele = tAllele2 else: altAllele = tAllele2", "vcfFile = Options.outDir + Options.maf.split('/')[len(Options.maf.split('/'))-1].replace('.maf','.vcf') vcfFileSorted = Options.outDir + Options.maf.split('/')[len(Options.maf.split('/'))-1].replace('.head.maf','.sorted.vcf.gz') os.system(\"cat %s |", "= line[38] reportedVAF = line[28] i_t_vaf = line[43] # Get phasing information and", "and ref != tAllele2) or (strand != '+' and strand != '-'): with", "and ialt_reads=='NA': GT='./.' ref_reads='.' alt_reads='.' total_reads='.' vaf='.' else: sys.exit(\"ERROR: Problem processing DEL %s\"%('\\t'.join(line)))", "def CreateHeader(ioObject, Options, tumorID, normalID): now = datetime.datetime.now() ioObject.write(\"##fileformat=VCFv4.2\\n\") ioObject.write(\"##fileDate=%s\\n\"%(now.date())) ioObject.write(\"##source=maf2vcf.py\\n\") ioObject.write(\"##reference=%s\\n\"%(Options.refGenome)) ioObject.write(\"##sampleColumns=Normal.Tumor\\n\")", "at %s. Reference genome shows %s' % (ref, genomicPos, refSeq)) sys.exit() return(None) else:", "%s | awk '$1 ~ /^#/ {print $0;next} {print $0 | \\\"LC_ALL=C sort", "total_reads = str(int(ref_reads) + int(alt_reads)) vaf = repr(round(int(alt_reads) / float(total_reads), 4)) if vaf", "help=\"Reference genome to be used for maf2vcf conversion.\") parser.add_option('-s', '--spotCheckMaf', dest='spotcheck', default=False, action='store_true',", "with open(Options.maf,'r') as inFile: i = 0 for line in inFile: if i", "to ensure proper reference is used. Will spot check 2% of a file", "/^#/ {print $0;next} {print $0 | \\\"LC_ALL=C sort -k1,1 -k2,2n\\\"}' | gzip >", "open(errorFile, 'a') as errerOut: errerOut.write('\\t'.join(line)+'\\n') else: # This may seem duplicitious, but I", "elif (i_t_vaf==\"\" or i_t_vaf==\"NA\") and ref_reads == 'NA' and iref_reads=='NA' and alt_reads ==", "vaf=i_t_vaf GT=\"./.\" ref_reads = '.' alt_reads = '.' total_reads = '.' elif (i_t_vaf==\"\"", "file\") count+=1 line = line.rstrip('\\n').split('\\t') genomicPos = line[1] + \":\" + line[2] +", "any function you want to get run time information about. :param function: Function", "toPullIDs[13] count = 0 i = 0 with open(Options.maf, 'r') as inFile: with", "if (ref_reads != 'NA' or iref_reads!='NA') and (alt_reads != 'NA' or ialt_reads!='NA'): GT=\"0/1\"", "+ \\ line[44] # Normal variant field if anything if line[41] == \"NA\":", "%s\"%(errorFile)) def main(): print(\"INFO: Processing MAF file.\") FilePath = os.path.dirname(os.path.abspath(__file__)) (Options, Parser) =", "vcf line out lineOut = [chrom, pos, rsid, refAllele, altAllele, QUAL, '.', INFO,", "mutType == '': mutType = '.' if variantType == '': variantType = '.'", "line[0] + \";MAF_ref_context=\" + line[15].upper() + \";MAF_Genome_Change=\" + line[14] + \";MAF_Variant_Type=\" + variantType", "if variantClass != \"INS\" and variantClass != \"TNP\" and variantClass !=\"ONP\": toContinue =", "print(line) # print([k for k in a]) # sys.exit(\"Problem here\") elif i !=", "Reference Fasta file :param genomicPos: Genomic Position of interest. :param ref: Reference sequence", "%s\"%(vcfFile, vcfFileSorted)) os.system(\"rm %s\"%(vcfFile)) os.system(\"gzip %s\"%(errorFile)) def main(): print(\"INFO: Processing MAF file.\") FilePath", "''' import os import sys from optparse import OptionParser import subprocess from functools", "normalGenotype, sampleField] return (lineOut) def CreateVCFLine(line, errorFile, Options): line = line.rstrip('\\n').split('\\t') # Genomic", "# VCF has base directly preceding the deletion as the alternative base and", "May not be proper reference genome') print('ERROR: Improper reference. Found %s at %s.", "directly preceding the deletion as the alternative base and the variant pos vcfAlt", "March 2018 ''' import os import sys from optparse import OptionParser import subprocess", "column\") # Strand Information strand = line[4] # Variant Classification/Type (Type is SNP,", "\":\" + line[2] + \"-\" + line[3] ref = line[7] mutType = line[5]", "= Options.outDir + Options.maf.split('/')[len(Options.maf.split('/'))-1].replace('.head.maf','.sorted.vcf.gz') os.system(\"cat %s | awk '$1 ~ /^#/ {print $0;next}", "reference sequence and perform check if needed. :param check: Whether or not to", "if read != \"NA\"][0] total_reads = str(int(ref_reads) + int(alt_reads)) vaf = str(int(alt_reads) /", "information and determine reads for vaf==1 if (ref_reads != 'NA' or iref_reads !=", "open(errorFile, 'a') as errerOut: errerOut.write('\\t'.join(line)+'\\n') if Options.verbose: print(\"WARNING: %s\" % '\\t'.join(line)) return(None) #", "reference is used. Will spot check 2% of a file of more than", "vcfRef, vcfAlt, QUAL, '.', INFO, \"GT:AD:DP:VF\", normalGenotype, sampleField] return(lineOut) def processINS(line, chrom, pos,", "as possible... if Options.verbose: print(\"WARNING: Malformed MAF entry. %s\"%('\\t'.join(line))) print('') with open(errorFile, 'a')", ":return: None ''' sys.stdout.write('\\r') j = (i + 1) / n sys.stdout.write(\"[%-20s] %d%%\\t", "genomicPos, ref='', check=True): ''' Obtain reference sequence and perform check if needed. :param", "# Simple SNV cases else: total_reads = str(int(ref_reads) + int(alt_reads)) vaf = repr(round(int(alt_reads)", "( i_t_vaf == \"\" or i_t_vaf == \"NA\") and ref_reads == 'NA' and", "vaf = i_t_vaf GT = \"./.\" ref_reads = '.' alt_reads = '.' total_reads", "errerOut: errerOut.write('\\t'.join(line)+'\\n') else: # This may seem duplicitious, but I explicityly want to", "= \"0/1\" ref_reads = [read for read in [ref_reads, iref_reads] if read !=", "tAllele1: altAllele = tAllele1 refAllele = tAllele2 else: altAllele = tAllele2 refAllele =", "== 'NA' and iref_reads=='NA' and alt_reads == 'NA' and ialt_reads=='NA': GT='./.' ref_reads='.' alt_reads='.'", "\";MAF_Genome_Change=\" + line[14] + \";MAF_Variant_Type=\" + variantType + \";MAF_Variant_Classification=\" + mutType +\";DCC_Project_Code=\" +", "INS, DEL, etc.) mutType = line[5] variantType = line[6] # Create proper vcf", "line[15].upper() + \";MAF_Genome_Change=\" + line[14] + \";MAF_Variant_Type=\" + variantType + \";MAF_Variant_Classification=\" + mutType", "print out that is informative. :return: None ''' sys.stdout.write('\\r') j = (i +", "'.' alt_reads = '.' total_reads = '.' vaf = '.' else: sys.exit(\"ERROR: Problem", "= \"wc -l %s\" % (fileName) pipe = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout return(int(pipe.read().decode(\"utf-8\").lstrip(\" \").split(\"", "refAllele = tAllele2 else: altAllele = tAllele2 refAllele = tAllele1 ref_reads = line[39]", "np.random.shuffle(a) a = list(a[:int(fileLength*n)]) i = 0 count = 0 for line in", "= \"MAF_Hugo_Symbol=\" + line[0] + \";MAF_ref_context=\" + line[15].upper() + \";MAF_Genome_Change=\" + line[ 14]", "if Options.verbose: print(\"ERROR: %s\"%(line)) sys.exit(\"ERROR: Problem in id column\") # Strand Information strand", "# Get read information iref_reads = line[37] ialt_reads = line[36] ref_reads = line[39]", "entry.\") return(linetowrite) def CreateHeader(ioObject, Options, tumorID, normalID): now = datetime.datetime.now() ioObject.write(\"##fileformat=VCFv4.2\\n\") ioObject.write(\"##fileDate=%s\\n\"%(now.date())) ioObject.write(\"##source=maf2vcf.py\\n\")", "line.startswith('Hugo_Symbol Chromosome Start_position') == False: # checkIt = len([k for k in a", "if ref == tAllele1: altAllele = tAllele1 refAllele = tAllele2 else: altAllele =", "file.\\\">\\n\") ioObject.write(\"##INFO=<ID=DCC_Project_Code,Number=1,Type=String,Description=\\\"DCC Project Code in original MAF file.\\\">\\n\") ioObject.write(\"##FORMAT=<ID=GT,Number=1,Type=String,Description=\\\"Genotype\\\">\\n\") ioObject.write(\"##FORMAT=<ID=AD,Number=2,Type=Integer,Description=\\\"Allelic depths of REF", "Frequency.\\\">\\n\") ioObject.write(\"#CHROM\\tPOS\\tID\\tREF\\tALT\\tQUAL\\tFILTER\\tINFO\\tFORMAT\\t%s\\t%s\\n\"%(normalID,tumorID)) @fn_timer def ProcessFile(Options): n = UpdateProgressGetN(Options.maf) if Options.spotcheck: with open(Options.maf, 'r')", "strand = line[4] # Variant Classification/Type (Type is SNP, INS, DEL, etc.) mutType", "information iref_reads = line[37] ialt_reads = line[36] ref_reads = line[39] alt_reads = line[38]", "proper reference genome') print('ERROR: Improper reference. Found %s at %s. Reference genome shows", "('\\t'.join(line))) sampleField = ':'.join([GT, ','.join([ref_reads, alt_reads]), total_reads, vaf]) # Create INFO field INFO", "= line[8] # Normal Allele Typically tAllele2 = line[9] # Alt Allele Typically", "'NA' and alt_reads == 'NA' and ialt_reads == 'NA': GT = './.' ref_reads", "False: # checkIt = len([k for k in a if k==i]) # if", "the variant pos vcfAlt = refSeq[0]+altAllele vcfPos = refAnchorPos # Get read information", "open(errorFile, 'w') as errorOut: errorOut.write(header) CreateHeader(outVCF, Options, tumorID, normalID) for line in inFile:", "\".:.,.:.:%s\" % (line[41]) lineOut = [chrom, vcfPos, rsid, vcfRef, vcfAlt, QUAL, '.', INFO,", "tAllele2 else: altAllele = tAllele2 refAllele = tAllele1 # Obtain the reference sequence", "','.join([ref_reads, alt_reads]), total_reads, vaf]) # Last check for interesting but unresolved MAF line", "0 and line.startswith('Hugo_Symbol Chromosome Start_position') == False: print(\"\") print(\"ERROR: No header found in", "'.' total_reads = ref_reads sampleField = ':'.join([GT, ','.join([ref_reads, alt_reads]), total_reads, vaf]) # Tossing", "Options.maf.split('/')[len(Options.maf.split('/')) - 1].replace('.maf', '.ignoredSNVs.maf') with open(errorFile, 'w') as errorOut: errorOut.write(header) CreateHeader(outVCF, Options, tumorID,", "if Options.verbose: print(\"WARNING: %s\" % '\\t'.join(line)) return(None) # Create INFO field INFO =", "strand, errorFile, Options): ref = line[7] tAllele1 = line[8] # Normal Allele tAllele2", "A string that you want to print out that is informative. :return: None", "print(\"WARNING: %s\" % '\\t'.join(line)) return(None) # Create INFO field INFO = \"MAF_Hugo_Symbol=\" +", "rsid.startswith(\"rs\") == False: if Options.verbose: print(\"ERROR: %s\"%(line)) sys.exit(\"ERROR: Problem in id column\") #", "a function. ''' @wraps(function) def function_timer(*args, **kwargs): t0 = time.time() result = function(*args,", "file\") parser.add_option('-r', '--ref_genome', dest=\"refGenome\", default=\"/Users/schencro/Desktop/Bioinformatics_Tools/Ref_Genomes/Ensembl/GRCh37.75/GRCh37.75.fa\", help=\"Reference genome to be used for maf2vcf conversion.\")", "ref_reads = '.' alt_reads = '.' total_reads = '.' elif (i_t_vaf==\"\" or i_t_vaf==\"NA\")", "about. :param function: Function of interest. :return: A function to wrap around a", "import time import numpy as np def OptionParsing(): usage = 'usage: %prog -i", "+ line[3], check=False) # VCF reference is the preceding base in the insertion", "chrom + \":\" + refAnchorPos + \"-\" + line[3], check=False) # VCF reference", "'-'): with open(errorFile, 'a') as errerOut: errerOut.write('\\t'.join(line)+'\\n') if Options.verbose: print(\"WARNING: %s\" % '\\t'.join(line))", "-k1,1 -k2,2n\\\"}' | gzip > %s\"%(vcfFile, vcfFileSorted)) os.system(\"rm %s\"%(vcfFile)) os.system(\"gzip %s\"%(errorFile)) def main():", "+ \":\" + refAnchorPos + \"-\" + line[3], check=False) # VCF reference is", "if Options.verbose: print(\"WARNING: Malformed MAF entry. %s\"%('\\t'.join(line))) print('') with open(errorFile, 'a') as errerOut:", "interest. :param ref: Reference sequence to compare to fetched sequence. :return: Fetched reference", "toContinue = True else: sys.exit(\"What the fuck\") i+=1 print('') return(toContinue) def processSNP(line, chrom,", "total_reads = '.' vaf = '.' else: sys.exit(\"ERROR: Problem processing INS %s\" %", "sampleField = ':'.join([GT, ','.join([ref_reads, alt_reads]), total_reads, vaf]) # Create INFO field INFO =", "''.join([line.decode('utf-8').rstrip('\\n') for line in outInfo[1:]]) if check: if refSeq == ref: return(True) else:", "deletion as the alternative base and the variant pos vcfAlt = refSeq[0]+altAllele vcfPos", "MAF to a vcf4.2 file using python >=3.6. Created by <NAME> 8 March", "as errerOut: errerOut.write('\\t'.join(line)+'\\n') if Options.verbose: print(\"WARNING: %s\" % '\\t'.join(line)) return(None) # Create INFO", "Verifying maf file.\") if fileLength > 200: n=0.02 else: n=1. a = np.arange(fileLength)", "linetowrite = processSNP(line, chrom, pos, rsid, mutType, variantType, strand, errorFile, Options) elif variantType==\"DEL\":", "SpotCheckProperReference(inFile, Options, n) with open(Options.maf,'r') as inFile: i = 0 for line in", "possible... if Options.verbose: print(\"WARNING: Malformed MAF entry. %s\"%('\\t'.join(line))) print('') with open(errorFile, 'a') as", "time.time() result = function(*args, **kwargs) t1 = time.time() print (\"INFO: Total time running", "for interesting but unresolved MAF line if (ref != tAllele1 and ref !=", "not to throw error if the provided reference matches :param refGenome: Reference Fasta", "[alt_reads, ialt_reads] if read != \"NA\"][0] total_reads = str(int(ref_reads) + int(alt_reads)) vaf =", "to convert a MAF to a vcf4.2 file using python >=3.6. Created by", "inFile: with open(Options.outDir + Options.maf.split('/')[len(Options.maf.split('/'))-1].replace('.maf','.vcf'), 'w') as outVCF: errorFile = Options.outDir + Options.maf.split('/')[len(Options.maf.split('/'))", "\"0/1\" ref_reads = [read for read in [ref_reads, iref_reads] if read != \"NA\"][0]", "= line[42] if QUAL == 'None' or QUAL == 'NA' or QUAL ==", "dest=\"outDir\", default=None, help=\"Output directory for .vcf file\") parser.add_option('-r', '--ref_genome', dest=\"refGenome\", default=\"/Users/schencro/Desktop/Bioinformatics_Tools/Ref_Genomes/Ensembl/GRCh37.75/GRCh37.75.fa\", help=\"Reference genome", "the MAF file. elif ref_reads == 'NA' or alt_reads == 'NA' and reportedVAF", "Start_position'): count+=1 i += 1 else: i += 1 linetoWrite = CreateVCFLine(line, errorFile,", "vcfAlt, QUAL, '.', INFO, \"GT:AD:DP:VF\", normalGenotype, sampleField] return(lineOut) def processINS(line, chrom, pos, rsid,", "fileLength > 200: n=0.02 else: n=1. a = np.arange(fileLength) np.random.shuffle(a) a = list(a[:int(fileLength*n)])", "= toPullIDs[13] count = 0 i = 0 with open(Options.maf, 'r') as inFile:", "file. Default=False\") parser.add_option('-v', '--verbose', dest='verbose', default=False, action='store_true', help=\"Use this flag to turn on", "UpdateProgress(i, n, DisplayText): ''' Prints a progress bar where appropriate. :param i: Current", "as outVCF: errorFile = Options.outDir + Options.maf.split('/')[len(Options.maf.split('/')) - 1].replace('.maf', '.ignoredSNVs.maf') with open(errorFile, 'w')", "mutType, variantType, strand, errorFile, Options) elif variantType==\"DEL\": linetowrite = processDEL(line, chrom, pos, rsid,", "arguments for maf file, output directory, and reference genome (single fasta file).\") sys.exit()", "str(int(alt_reads)/float(total_reads)) elif i_t_vaf!=\"\" and i_t_vaf!=\"NA\" and ref_reads == 'NA' and iref_reads=='NA' and alt_reads", "Options :param fileLength: Length of the file being read :return: None ''' print(\"INFO:", "Position chrom, pos, id = line[1], line[2], line[10] # Get rs ID rsid", "\".:.,.:.:%s\"%(line[41]) # Final vcf line out lineOut = [chrom, pos, rsid, refAllele, altAllele,", "in the MAF file. vcfRef = refSeq # VCF has base directly preceding", "a file of more than 200 variants. :param mafFile: Input mafFile object (opened)", "in MAF vcfRef = refSeq[0] # VCF has base directly preceding the deletion", "ref_reads = '.' alt_reads = '.' total_reads = '.' elif ( i_t_vaf ==", "but I explicityly want to know as much of what I'm choosing to", "200 variants. :param mafFile: Input mafFile object (opened) :param Options: Parser Options :param", "SamtoolsFaidx(Options.refGenome, chrom + \":\" + refAnchorPos + \"-\" + line[3], check=False) if refSeq[1:]", "file. vcfRef = refSeq # VCF has base directly preceding the deletion as", "%s: %s minutes\" % (function.__name__, str(round((t1-t0)/60.,2))) ) return result return function_timer def UpdateProgressGetN(fileName):", "Allele Typically QUAL = line[42] if QUAL == 'None' or QUAL == 'NA'", ":param fileLength: Length of the file being read :return: None ''' print(\"INFO: Verifying", "Random sampling is employed to ensure proper reference is used. Will spot check", "by <NAME> 8 March 2018 ''' import os import sys from optparse import", "\";DCC_Project_Code=\" + \\ line[44] # Normal variant field if anything if line[41] ==", "Verifying maf file\") count+=1 line = line.rstrip('\\n').split('\\t') genomicPos = line[1] + \":\" +", "ioObject.write(\"##INFO=<ID=MAF_Hugo_Symbol,Number=1,Type=String,Description=\\\"HUGO Symbol in original MAF file.\\\">\\n\") ioObject.write(\"##INFO=<ID=MAF_ref_context,Number=1,Type=String,Description=\\\"Reference context in original MAF file.\\\">\\n\") ioObject.write(\"##INFO=<ID=MAF_Genome_Change,Number=1,Type=String,Description=\\\"Genome", "== 'NA': ref_reads = '.' total_reads = alt_reads else: alt_reads = '.' total_reads", "processDEL(line, chrom, pos, rsid, mutType, variantType, strand, errorFile, Options): ref = line[7] tAllele1", "Input mafFile object (opened) :param Options: Parser Options :param fileLength: Length of the", "function: Function of interest. :return: A function to wrap around a function. '''", "function(*args, **kwargs) t1 = time.time() print (\"INFO: Total time running %s: %s minutes\"", "than 200 variants. :param mafFile: Input mafFile object (opened) :param Options: Parser Options", "and alt_reads == 'NA' and ialt_reads=='NA': GT='./.' ref_reads='.' alt_reads='.' total_reads='.' vaf='.' else: sys.exit(\"ERROR:", "alt_reads else: alt_reads = '.' total_reads = ref_reads sampleField = ':'.join([GT, ','.join([ref_reads, alt_reads]),", "parser.add_option('-v', '--verbose', dest='verbose', default=False, action='store_true', help=\"Use this flag to turn on verbose mode.", "count+=1 line = line.rstrip('\\n').split('\\t') genomicPos = line[1] + \":\" + line[2] + \"-\"", "equal to 1 if ref_reads == 'NA': ref_reads = '.' total_reads = alt_reads", "UpdateProgressGetN(Options.maf) if Options.spotcheck: with open(Options.maf, 'r') as inFile: SpotCheckProperReference(inFile, Options, n) with open(Options.maf,'r')", "variantClass !=\"ONP\": toContinue = SamtoolsFaidx(Options.refGenome, genomicPos, ref) if count == len(a): print('') return(toContinue)", "GT=\"0/1\" ref_reads = [read for read in [ref_reads, iref_reads] if read != \"NA\"][0]", "= alt_reads else: alt_reads = '.' total_reads = ref_reads sampleField = ':'.join([GT, ','.join([ref_reads,", "'.ignoredSNVs.maf') with open(errorFile, 'w') as errorOut: errorOut.write(header) CreateHeader(outVCF, Options, tumorID, normalID) for line", "%s\"%(vcfFile)) os.system(\"gzip %s\"%(errorFile)) def main(): print(\"INFO: Processing MAF file.\") FilePath = os.path.dirname(os.path.abspath(__file__)) (Options,", "cmd = \"gzip -cd %s | wc -l\" % (fileName) pipe = subprocess.Popen(cmd,", "Improper reference. Found %s at %s. Reference genome shows %s' % (ref, genomicPos,", "ref: Reference sequence to compare to fetched sequence. :return: Fetched reference sequence. '''", "print([k for k in a]) # sys.exit(\"Problem here\") elif i != 0 and", "genomicPos: Genomic Position of interest. :param ref: Reference sequence to compare to fetched", "Sorting vcf file.\") vcfFile = Options.outDir + Options.maf.split('/')[len(Options.maf.split('/'))-1].replace('.maf','.vcf') vcfFileSorted = Options.outDir + Options.maf.split('/')[len(Options.maf.split('/'))-1].replace('.head.maf','.sorted.vcf.gz')", "== 'NA' and reportedVAF == 'NA': with open(errorFile, 'a') as errerOut: errerOut.write('\\t'.join(line)+'\\n') if", "return(toContinue) # else: # print(checkIt) # print(line) # print([k for k in a])", "tAllele1 = line[8] # Normal Allele tAllele2 = line[9] # Alt Allele QUAL", "== 'NA': vaf = i_t_vaf GT = \"./.\" ref_reads = '.' alt_reads =", "1 else: i += 1 linetoWrite = CreateVCFLine(line, errorFile, Options) if linetoWrite is", "[read for read in [ref_reads, iref_reads] if read != \"NA\"][0] alt_reads = [read", "'--spotCheckMaf', dest='spotcheck', default=False, action='store_true', help=\"Use this flag to verify reference matching to maf", "i += 1 linetoWrite = CreateVCFLine(line, errorFile, Options) if linetoWrite is not None:", "alt_reads == 'NA' and reportedVAF == '1': GT = \"1/1\" # Appears to", "to print out that is informative. :return: None ''' sys.stdout.write('\\r') j = (i", "1) / n sys.stdout.write(\"[%-20s] %d%%\\t INFO: %s\" % ('=' * int(20 * j),", "vcfAlt = refSeq[0]+altAllele vcfPos = refAnchorPos # Get read information iref_reads = line[37]", "MAF entry. %s\"%('\\t'.join(line))) print('') with open(errorFile, 'a') as errerOut: errerOut.write('\\t'.join(line)+'\\n') # print(line) #", "inFile: if i == 1: toPullIDs = line.rstrip('\\n').split('\\t') break else: header = line", "= line[8] # Normal Allele tAllele2 = line[9] # Alt Allele QUAL =", "awk '$1 ~ /^#/ {print $0;next} {print $0 | \\\"LC_ALL=C sort -k1,1 -k2,2n\\\"}'", "(\"INFO: Total time running %s: %s minutes\" % (function.__name__, str(round((t1-t0)/60.,2))) ) return result", "np def OptionParsing(): usage = 'usage: %prog -i <*.maf> -o <directory> -r <ref.fa>'", "maf file.\") if fileLength > 200: n=0.02 else: n=1. a = np.arange(fileLength) np.random.shuffle(a)", "or QUAL == 'NA' or QUAL == '': QUAL = '.' if ref", "+ line[2] + \"-\" + line[3] ref = line[7] mutType = line[5] variantClass", "% ('\\t'.join(line))) sampleField = ':'.join([GT, ','.join([ref_reads, alt_reads]), total_reads, vaf]) # Create INFO field", "datetime import time import numpy as np def OptionParsing(): usage = 'usage: %prog", "errorFile, Options) elif variantType==\"INS\": linetowrite = processINS(line, chrom, pos, rsid, mutType, variantType, strand,", "Use this as a wrapper at the top of any function you want", "reads for vaf==1 if (ref_reads != 'NA' or iref_reads != 'NA') and (alt_reads", "len([k for k in a if k==i]) # if checkIt==1: UpdateProgress(count, len(a), \"INFO:", "= refSeq[0] # VCF has base directly preceding the deletion as the alternative", "and the variant pos vcfAlt = refSeq[0]+altAllele vcfPos = refAnchorPos # Get read", "Code in original MAF file.\\\">\\n\") ioObject.write(\"##FORMAT=<ID=GT,Number=1,Type=String,Description=\\\"Genotype\\\">\\n\") ioObject.write(\"##FORMAT=<ID=AD,Number=2,Type=Integer,Description=\\\"Allelic depths of REF and ALT(s) in", "SpotCheckProperReference(mafFile, Options, fileLength): ''' Randomly samples the file to ensure proper reference file", "line[10] # Get rs ID rsid = line[10] if rsid == '': rsid", "| \\\"LC_ALL=C sort -k1,1 -k2,2n\\\"}' | gzip > %s\"%(vcfFile, vcfFileSorted)) os.system(\"rm %s\"%(vcfFile)) os.system(\"gzip", "VCF has base directly preceding the deletion as the alternative base and the", "= '.' alt_reads = '.' total_reads = '.' elif ( i_t_vaf == \"\"", "wrap around a function. ''' @wraps(function) def function_timer(*args, **kwargs): t0 = time.time() result", "in the insertion in MAF vcfRef = refSeq[0] # VCF has base directly", "UpdateProgress(count, len(a), \"INFO: Verifying maf file\") count+=1 line = line.rstrip('\\n').split('\\t') genomicPos = line[1]", "= [chrom, pos, rsid, refAllele, altAllele, QUAL, '.', INFO, \"GT:AD:DP:VF\", normalGenotype, sampleField] return(lineOut)", "= [chrom, vcfPos, rsid, vcfRef, vcfAlt, QUAL, '.', INFO, \"GT:AD:DP:VF\", normalGenotype, sampleField] return", "and ref_reads == 'NA' and iref_reads=='NA' and alt_reads == 'NA' and ialt_reads=='NA': vaf=i_t_vaf", "line[3], check=False) # VCF reference is the preceding base in the insertion in", "file using python >=3.6. Created by <NAME> 8 March 2018 ''' import os", "QUAL == 'None' or QUAL == 'NA' or QUAL == '': QUAL =", "MAF file.\\\">\\n\") ioObject.write(\"##INFO=<ID=MAF_Genome_Change,Number=1,Type=String,Description=\\\"Genome change in original MAF file.\\\">\\n\") ioObject.write(\"##INFO=<ID=MAF_Variant_Type,Number=1,Type=String,Description=\\\"Variant type (SNP,INS,DEL) in original", "Please include arguments for maf file, output directory, and reference genome (single fasta", "# print(line) # print([k for k in a]) # sys.exit(\"Problem here\") elif i", "# Final vcf line out lineOut = [chrom, pos, rsid, refAllele, altAllele, QUAL,", "'.' total_reads = '.' elif (i_t_vaf==\"\" or i_t_vaf==\"NA\") and ref_reads == 'NA' and", "= str(int(pos) - 1) # Fetch the base that precedes the deletion. refSeq", "\"gzip -cd %s | wc -l\" % (fileName) pipe = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout", "def processDEL(line, chrom, pos, rsid, mutType, variantType, strand, errorFile, Options): ref = line[7]", "checkIt = len([k for k in a if k==i]) # if checkIt==1: UpdateProgress(count,", "\";MAF_Variant_Classification=\" + mutType +\";DCC_Project_Code=\" + line[44] # Normal variant field if anything if", "and (alt_reads != 'NA' or ialt_reads!='NA'): GT=\"0/1\" ref_reads = [read for read in", "'': QUAL = '.' if ref == tAllele1: altAllele = tAllele1 refAllele =", "ref_reads='.' alt_reads='.' total_reads='.' vaf='.' else: sys.exit(\"ERROR: Problem processing DEL %s\"%('\\t'.join(line))) sampleField = ':'.join([GT,", "sequence to compare to fetched sequence. :return: Fetched reference sequence. ''' proc =", "header = line i+=1 tumorID = toPullIDs[12] normalID = toPullIDs[13] count = 0", "rs ID rsid = line[10] if rsid == '': rsid = '.' elif", "to be used for maf2vcf conversion.\") parser.add_option('-s', '--spotCheckMaf', dest='spotcheck', default=False, action='store_true', help=\"Use this", "= '.' if ref == tAllele1: altAllele = tAllele1 refAllele = tAllele2 else:", "if fileLength > 200: n=0.02 else: n=1. a = np.arange(fileLength) np.random.shuffle(a) a =", "a wrapper at the top of any function you want to get run", "errerOut: errerOut.write('\\t'.join(line)+'\\n') # print(line) # sys.exit(\"ERROR: Malformed MAF entry.\") return(linetowrite) def CreateHeader(ioObject, Options,", "line[41] == \"NA\": normalGenotype = \".:.,.:.:.\" else: normalGenotype = \".:.,.:.:%s\" % (line[41]) lineOut", "options.refGenome is None: print(\"ERROR: Please include arguments for maf file, output directory, and", "# Last check for interesting but unresolved MAF line if (ref != tAllele1", "Parser Options :param fileLength: Length of the file being read :return: None '''", "in [ref_reads, iref_reads] if read != \"NA\"][0] alt_reads = [read for read in", "+ Options.maf.split('/')[len(Options.maf.split('/'))-1].replace('.maf','.vcf') vcfFileSorted = Options.outDir + Options.maf.split('/')[len(Options.maf.split('/'))-1].replace('.head.maf','.sorted.vcf.gz') os.system(\"cat %s | awk '$1 ~", ".vcf file\") parser.add_option('-r', '--ref_genome', dest=\"refGenome\", default=\"/Users/schencro/Desktop/Bioinformatics_Tools/Ref_Genomes/Ensembl/GRCh37.75/GRCh37.75.fa\", help=\"Reference genome to be used for maf2vcf", "determine reads for vaf==1 if (ref_reads != 'NA' or iref_reads != 'NA') and", "i != 0 and line.startswith('Hugo_Symbol Chromosome Start_position') == False: # checkIt = len([k", "ialt_reads] if read != \"NA\"][0] total_reads = str(int(ref_reads) + int(alt_reads)) vaf = str(int(alt_reads)/float(total_reads))", "(alt_reads != 'NA' or ialt_reads != 'NA'): GT = \"0/1\" ref_reads = [read", "'NA' or iref_reads != 'NA') and (alt_reads != 'NA' or ialt_reads != 'NA'):", "Script to convert a MAF to a vcf4.2 file using python >=3.6. Created", "errerOut: errerOut.write('\\t'.join(line)+'\\n') if Options.verbose: print(\"WARNING: %s\" % '\\t'.join(line)) return(None) # Create INFO field", "4)) if vaf != '1.' and strand==\"+\" or strand==\"-\": GT=\"0|1\" else: GT=\"0/1\" sampleField", "ioObject.write(\"##fileDate=%s\\n\"%(now.date())) ioObject.write(\"##source=maf2vcf.py\\n\") ioObject.write(\"##reference=%s\\n\"%(Options.refGenome)) ioObject.write(\"##sampleColumns=Normal.Tumor\\n\") ioObject.write(\"##INFO=<ID=MAF_Hugo_Symbol,Number=1,Type=String,Description=\\\"HUGO Symbol in original MAF file.\\\">\\n\") ioObject.write(\"##INFO=<ID=MAF_ref_context,Number=1,Type=String,Description=\\\"Reference context in", "= line[37] ialt_reads = line[36] ref_reads = line[39] alt_reads = line[38] reportedVAF =", "reads for vaf==1 if (ref_reads != 'NA' or iref_reads!='NA') and (alt_reads != 'NA'", "sys.exit(\"Problem here\") elif i != 0 and line.startswith('Hugo_Symbol Chromosome Start_position') == False: print(\"\")", "def OptionParsing(): usage = 'usage: %prog -i <*.maf> -o <directory> -r <ref.fa>' parser", "Allele tAllele2 = line[9] # Alt Allele QUAL = line[42] if QUAL ==", "ioObject.write(\"##source=maf2vcf.py\\n\") ioObject.write(\"##reference=%s\\n\"%(Options.refGenome)) ioObject.write(\"##sampleColumns=Normal.Tumor\\n\") ioObject.write(\"##INFO=<ID=MAF_Hugo_Symbol,Number=1,Type=String,Description=\\\"HUGO Symbol in original MAF file.\\\">\\n\") ioObject.write(\"##INFO=<ID=MAF_ref_context,Number=1,Type=String,Description=\\\"Reference context in original", "in inFile: if i == 1: toPullIDs = line.rstrip('\\n').split('\\t') break else: header =", "return(True) else: print('ERROR: May not be proper reference genome') print('ERROR: Improper reference. Found", "where appropriate. :param i: Current Step :param n: Total number of steps. :param", "is SNP, INS, DEL, etc.) mutType = line[5] variantType = line[6] # Create", "variantType==\"DEL\": linetowrite = processDEL(line, chrom, pos, rsid, mutType, variantType, strand, errorFile, Options) elif", "# Get rs ID rsid = line[10] if rsid == '': rsid =", "(alt_reads != 'NA' or ialt_reads!='NA'): GT=\"0/1\" ref_reads = [read for read in [ref_reads,", "count+=1 i += 1 else: i += 1 linetoWrite = CreateVCFLine(line, errorFile, Options)", "= '.' total_reads = ref_reads sampleField = ':'.join([GT, ','.join([ref_reads, alt_reads]), total_reads, vaf]) #", "= tAllele1 refAllele = tAllele2 else: altAllele = tAllele2 refAllele = tAllele1 #", "normal?) vaf = reportedVAF # Sets VAF equal to 1 if ref_reads ==", "+ Options.maf.split('/')[len(Options.maf.split('/')) - 1].replace('.maf', '.ignoredSNVs.maf') with open(errorFile, 'w') as errorOut: errorOut.write(header) CreateHeader(outVCF, Options,", "with open(Options.maf, 'r') as inFile: with open(Options.outDir + Options.maf.split('/')[len(Options.maf.split('/'))-1].replace('.maf','.vcf'), 'w') as outVCF: errorFile", "\"-\" + line[3], check=False) if refSeq[1:] != altAllele: print(\"ERROR: Deletion alternative allele does", "and i_t_vaf != \"NA\" and ref_reads == 'NA' and iref_reads == 'NA' and", "SamtoolsFaidx(Options.refGenome, chrom + \":\" + refAnchorPos + \"-\" + line[3], check=False) # VCF", "else: return(refSeq) def SpotCheckProperReference(mafFile, Options, fileLength): ''' Randomly samples the file to ensure", "Normal Allele Typically tAllele2 = line[9] # Alt Allele Typically QUAL = line[42]", "if tAllele1 == '-': altAllele = tAllele2 else: altAllele = tAllele1 # Obtain", "errorFile, Options) elif variantType==\"DEL\": linetowrite = processDEL(line, chrom, pos, rsid, mutType, variantType, strand,", "linetowrite = None if variantType==\"SNP\": linetowrite = processSNP(line, chrom, pos, rsid, mutType, variantType,", "str(int(ref_reads) + int(alt_reads)) vaf = repr(round(int(alt_reads) / float(total_reads), 4)) if vaf != '1.'", "!= \"NA\"][0] total_reads = str(int(ref_reads) + int(alt_reads)) vaf = str(int(alt_reads)/float(total_reads)) elif i_t_vaf!=\"\" and", "normalGenotype, sampleField] return(lineOut) def processDEL(line, chrom, pos, rsid, mutType, variantType, strand, errorFile, Options):", "if read != \"NA\"][0] total_reads = str(int(ref_reads) + int(alt_reads)) vaf = str(int(alt_reads)/float(total_reads)) elif", "ialt_reads=='NA': GT='./.' ref_reads='.' alt_reads='.' total_reads='.' vaf='.' else: sys.exit(\"ERROR: Problem processing DEL %s\"%('\\t'.join(line))) sampleField", "string that you want to print out that is informative. :return: None '''", "help=\"Output directory for .vcf file\") parser.add_option('-r', '--ref_genome', dest=\"refGenome\", default=\"/Users/schencro/Desktop/Bioinformatics_Tools/Ref_Genomes/Ensembl/GRCh37.75/GRCh37.75.fa\", help=\"Reference genome to be", "= SamtoolsFaidx(Options.refGenome, chrom + \":\" + refAnchorPos + \"-\" + line[3], check=False) if", "print(\"ERROR: Deletion alternative allele does not match reference sequence. %s\" % ('\\t'.join(line))) sys.exit()", "CreateVCFLine(line, errorFile, Options): line = line.rstrip('\\n').split('\\t') # Genomic Position chrom, pos, id =", "vcfFileSorted)) os.system(\"rm %s\"%(vcfFile)) os.system(\"gzip %s\"%(errorFile)) def main(): print(\"INFO: Processing MAF file.\") FilePath =", "k in a]) # sys.exit(\"Problem here\") elif i != 0 and line.startswith('Hugo_Symbol Chromosome", "tAllele2 else: altAllele = tAllele2 refAllele = tAllele1 ref_reads = line[39] alt_reads =", "entry. %s\"%('\\t'.join(line))) print('') with open(errorFile, 'a') as errerOut: errerOut.write('\\t'.join(line)+'\\n') # print(line) # sys.exit(\"ERROR:", "subprocess.Popen(['samtools','faidx',refGenome, genomicPos], stdout=subprocess.PIPE) proc.wait() outInfo = proc.stdout.readlines() refSeq = ''.join([line.decode('utf-8').rstrip('\\n') for line in", "sequence. ''' proc = subprocess.Popen(['samtools','faidx',refGenome, genomicPos], stdout=subprocess.PIPE) proc.wait() outInfo = proc.stdout.readlines() refSeq =", "'a') as errerOut: errerOut.write('\\t'.join(line)+'\\n') if Options.verbose: print(\"WARNING: %s\" % '\\t'.join(line)) return(None) # Create", "deletion. refSeq = SamtoolsFaidx(Options.refGenome, chrom + \":\" + refAnchorPos + \"-\" + line[3],", "information if mutType == '': mutType = '.' if variantType == '': variantType", "Options, n) with open(Options.maf,'r') as inFile: i = 0 for line in inFile:", "= '.' vaf = '.' else: sys.exit(\"ERROR: Problem processing INS %s\" % ('\\t'.join(line)))", "# Get phasing information and determine reads for vaf==1 if (ref_reads != 'NA'", "phasing information and determine reads for vaf==1 if ref_reads == 'NA' or alt_reads", "+ mutType + \";DCC_Project_Code=\" + \\ line[44] # Normal variant field if anything", "result = function(*args, **kwargs) t1 = time.time() print (\"INFO: Total time running %s:", "total_reads = '.' elif ( i_t_vaf == \"\" or i_t_vaf == \"NA\") and", "file.\\\">\\n\") ioObject.write(\"##INFO=<ID=MAF_Variant_Type,Number=1,Type=String,Description=\\\"Variant type (SNP,INS,DEL) in original MAF file.\\\">\\n\") ioObject.write(\"##INFO=<ID=MAF_Variant_Classification,Number=1,Type=String,Description=\\\"Variant Classification (if SNP) in", "2018 ''' import os import sys from optparse import OptionParser import subprocess from", "check=False) if refSeq[1:] != altAllele: print(\"ERROR: Deletion alternative allele does not match reference", "if refSeq[1:] != altAllele: print(\"ERROR: Deletion alternative allele does not match reference sequence.", "= line[5] variantType = line[6] # Create proper vcf formatted information if mutType", "'NA' and ialt_reads=='NA': vaf=i_t_vaf GT=\"./.\" ref_reads = '.' alt_reads = '.' total_reads =", "# Strand Information strand = line[4] # Variant Classification/Type (Type is SNP, INS,", "line[36] ref_reads = line[39] alt_reads = line[38] reportedVAF = line[28] i_t_vaf = line[43]", "or QUAL == '': QUAL = '.' if tAllele1 == '-': altAllele =", "elif rsid.startswith(\"rs\") == False: if Options.verbose: print(\"ERROR: %s\"%(line)) sys.exit(\"ERROR: Problem in id column\")", "ioObject.write(\"##INFO=<ID=MAF_ref_context,Number=1,Type=String,Description=\\\"Reference context in original MAF file.\\\">\\n\") ioObject.write(\"##INFO=<ID=MAF_Genome_Change,Number=1,Type=String,Description=\\\"Genome change in original MAF file.\\\">\\n\") ioObject.write(\"##INFO=<ID=MAF_Variant_Type,Number=1,Type=String,Description=\\\"Variant", "linetoWrite = CreateVCFLine(line, errorFile, Options) if linetoWrite is not None: outVCF.write('\\t'.join(linetoWrite)+'\\n') print('') print(\"INFO:", "!= altAllele: print(\"ERROR: Deletion alternative allele does not match reference sequence. %s\" %", "Fetch the base that precedes the deletion. refSeq = SamtoolsFaidx(Options.refGenome, chrom + \":\"", "== len(a): print('') return(toContinue) # else: # print(checkIt) # print(line) # print([k for", "INFO = \"MAF_Hugo_Symbol=\" + line[0] + \";MAF_ref_context=\" + line[15].upper() + \";MAF_Genome_Change=\" + line[14]", "np.arange(fileLength) np.random.shuffle(a) a = list(a[:int(fileLength*n)]) i = 0 count = 0 for line", "if Options.verbose: print(\"WARNING: %s\" % '\\t'.join(line)) return(None) # Simple SNV cases else: total_reads", "= time.time() result = function(*args, **kwargs) t1 = time.time() print (\"INFO: Total time", "or iref_reads!='NA') and (alt_reads != 'NA' or ialt_reads!='NA'): GT=\"0/1\" ref_reads = [read for", "+ Options.maf.split('/')[len(Options.maf.split('/'))-1].replace('.head.maf','.sorted.vcf.gz') os.system(\"cat %s | awk '$1 ~ /^#/ {print $0;next} {print $0", "Information strand = line[4] # Variant Classification/Type (Type is SNP, INS, DEL, etc.)", "'w') as outVCF: errorFile = Options.outDir + Options.maf.split('/')[len(Options.maf.split('/')) - 1].replace('.maf', '.ignoredSNVs.maf') with open(errorFile,", "employed to ensure proper reference is used. Will spot check 2% of a", "for vaf==1 if (ref_reads != 'NA' or iref_reads != 'NA') and (alt_reads !=", "MAF file.\\\">\\n\") ioObject.write(\"##FORMAT=<ID=GT,Number=1,Type=String,Description=\\\"Genotype\\\">\\n\") ioObject.write(\"##FORMAT=<ID=AD,Number=2,Type=Integer,Description=\\\"Allelic depths of REF and ALT(s) in the order listed\\\">\\n\")", "GT=\"./.\" ref_reads = '.' alt_reads = '.' total_reads = '.' elif (i_t_vaf==\"\" or", "t1 = time.time() print (\"INFO: Total time running %s: %s minutes\" % (function.__name__,", "alt_reads == 'NA' and ialt_reads == 'NA': GT = './.' ref_reads = '.'", "if count == len(a): print('') return(toContinue) # else: # print(checkIt) # print(line) #", "reported deletion in the MAF file. vcfRef = refSeq # VCF has base", "Fasta file :param genomicPos: Genomic Position of interest. :param ref: Reference sequence to", "Strand Information strand = line[4] # Variant Classification/Type (Type is SNP, INS, DEL,", "MAF file.\") FilePath = os.path.dirname(os.path.abspath(__file__)) (Options, Parser) = OptionParsing() ProcessFile(Options) if __name__==\"__main__\": main()", "site\\\">\\n\") ioObject.write(\"##FORMAT=<ID=VF,Number=1,Type=Float,Description=\\\"Variant Allele Frequency.\\\">\\n\") ioObject.write(\"#CHROM\\tPOS\\tID\\tREF\\tALT\\tQUAL\\tFILTER\\tINFO\\tFORMAT\\t%s\\t%s\\n\"%(normalID,tumorID)) @fn_timer def ProcessFile(Options): n = UpdateProgressGetN(Options.maf) if Options.spotcheck:", "\";MAF_Variant_Type=\" + variantType + \";MAF_Variant_Classification=\" + mutType + \";DCC_Project_Code=\" + \\ line[44] #", "j, DisplayText)) sys.stdout.flush() def SamtoolsFaidx(refGenome, genomicPos, ref='', check=True): ''' Obtain reference sequence and", "processINS(line, chrom, pos, rsid, mutType, variantType, strand, errorFile, Options): ref = line[7] tAllele1", "pos vcfAlt=refSeq[0] vcfPos=refAnchorPos # Get read information iref_reads = line[37] ialt_reads = line[36]", "+ \";MAF_Variant_Classification=\" + mutType + \";DCC_Project_Code=\" + \\ line[44] # Normal variant field", "strand==\"+\" or strand==\"-\": GT=\"0|1\" else: GT=\"0/1\" sampleField = ':'.join([GT, ','.join([ref_reads, alt_reads]), total_reads, vaf])", "GT = './.' ref_reads = '.' alt_reads = '.' total_reads = '.' vaf", "alt_reads == 'NA' and ialt_reads=='NA': GT='./.' ref_reads='.' alt_reads='.' total_reads='.' vaf='.' else: sys.exit(\"ERROR: Problem", "= OptionParser(usage) parser.add_option('-i', '--input_maf', dest=\"maf\", default=None, help=\".maf file to be converted.\") parser.add_option('-o', '--output_dir',", "QUAL == '': QUAL = '.' if ref == tAllele1: altAllele = tAllele1", "alt_reads == 'NA' and ialt_reads=='NA': vaf=i_t_vaf GT=\"./.\" ref_reads = '.' alt_reads = '.'", "line[44] # Normal variant field if anything if line[41] == \"NA\": normalGenotype =", "!= '1.' and strand==\"+\" or strand==\"-\": GT=\"0|1\" else: GT=\"0/1\" sampleField = ':'.join([GT, ','.join([ref_reads,", "output directory, and reference genome (single fasta file).\") sys.exit() else: pass return (options,", "field if anything if line[41]==\"NA\": normalGenotype = \".:.,.:.:.\" else: normalGenotype = \".:.,.:.:%s\"%(line[41]) #", "processing DEL %s\"%('\\t'.join(line))) sampleField = ':'.join([GT, ','.join([ref_reads, alt_reads]), total_reads, vaf]) # Create INFO", "altAllele = tAllele2 refAllele = tAllele1 ref_reads = line[39] alt_reads = line[38] reportedVAF", "i_t_vaf!=\"\" and i_t_vaf!=\"NA\" and ref_reads == 'NA' and iref_reads=='NA' and alt_reads == 'NA'", "VCF reference is the preceding base in the insertion in MAF vcfRef =", "sys.exit() return(None) else: return(refSeq) def SpotCheckProperReference(mafFile, Options, fileLength): ''' Randomly samples the file", "ref = line[7] tAllele1 = line[8] # Normal Allele tAllele2 = line[9] #", "\\\"LC_ALL=C sort -k1,1 -k2,2n\\\"}' | gzip > %s\"%(vcfFile, vcfFileSorted)) os.system(\"rm %s\"%(vcfFile)) os.system(\"gzip %s\"%(errorFile))", "Malformed MAF entry.\") return(linetowrite) def CreateHeader(ioObject, Options, tumorID, normalID): now = datetime.datetime.now() ioObject.write(\"##fileformat=VCFv4.2\\n\")", "ioObject.write(\"##FORMAT=<ID=GT,Number=1,Type=String,Description=\\\"Genotype\\\">\\n\") ioObject.write(\"##FORMAT=<ID=AD,Number=2,Type=Integer,Description=\\\"Allelic depths of REF and ALT(s) in the order listed\\\">\\n\") ioObject.write(\"##FORMAT=<ID=DP,Number=1,Type=Integer,Description=\\\"Total read", "INFO, \"GT:AD:DP:VF\", normalGenotype, sampleField] return(lineOut) def processDEL(line, chrom, pos, rsid, mutType, variantType, strand,", "%s\"%(line)) sys.exit(\"ERROR: Problem in id column\") # Strand Information strand = line[4] #", "Symbol in original MAF file.\\\">\\n\") ioObject.write(\"##INFO=<ID=MAF_ref_context,Number=1,Type=String,Description=\\\"Reference context in original MAF file.\\\">\\n\") ioObject.write(\"##INFO=<ID=MAF_Genome_Change,Number=1,Type=String,Description=\\\"Genome change", "refSeq # VCF has base directly preceding the deletion as the alternative base", "top of any function you want to get run time information about. :param", "= '.' alt_reads = '.' total_reads = '.' vaf = '.' else: sys.exit(\"ERROR:", "!= \"TNP\" and variantClass !=\"ONP\": toContinue = SamtoolsFaidx(Options.refGenome, genomicPos, ref) if count ==", "tAllele1 == '-': altAllele = tAllele2 else: altAllele = tAllele1 # Obtain the", "return(lineOut) def processDEL(line, chrom, pos, rsid, mutType, variantType, strand, errorFile, Options): ref =", "the DEL refAnchorPos = str(int(pos)-1) # Fetch the base that precedes the deletion.", "= subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout else: cmd = \"wc -l %s\" % (fileName) pipe", "def processSNP(line, chrom, pos, rsid, mutType, variantType, strand, errorFile, Options): ref = line[7]", "normalID): now = datetime.datetime.now() ioObject.write(\"##fileformat=VCFv4.2\\n\") ioObject.write(\"##fileDate=%s\\n\"%(now.date())) ioObject.write(\"##source=maf2vcf.py\\n\") ioObject.write(\"##reference=%s\\n\"%(Options.refGenome)) ioObject.write(\"##sampleColumns=Normal.Tumor\\n\") ioObject.write(\"##INFO=<ID=MAF_Hugo_Symbol,Number=1,Type=String,Description=\\\"HUGO Symbol in original", "if refSeq == ref: return(True) else: print('ERROR: May not be proper reference genome')", "== 'NA' and ialt_reads=='NA': GT='./.' ref_reads='.' alt_reads='.' total_reads='.' vaf='.' else: sys.exit(\"ERROR: Problem processing", "be converted.\") parser.add_option('-o', '--output_dir', dest=\"outDir\", default=None, help=\"Output directory for .vcf file\") parser.add_option('-r', '--ref_genome',", "else: normalGenotype = \".:.,.:.:%s\" % (line[41]) lineOut = [chrom, vcfPos, rsid, vcfRef, vcfAlt,", "(i + 1) / n sys.stdout.write(\"[%-20s] %d%%\\t INFO: %s\" % ('=' * int(20", "variantType, strand, errorFile, Options) elif variantType==\"TNP\" or variantType==\"ONP\": with open(errorFile, 'a') as errerOut:", "for alternative allele (germline unlikely since it is called w.r.t normal?) vaf =", "else: pass return (options, parser) def fn_timer(function): ''' Use this as a wrapper", "QUAL, '.', INFO, \"GT:AD:DP:VF\", normalGenotype, sampleField] return(lineOut) def processDEL(line, chrom, pos, rsid, mutType,", "return(refSeq) def SpotCheckProperReference(mafFile, Options, fileLength): ''' Randomly samples the file to ensure proper", "interesting but unresolved MAF line if (ref != tAllele1 and ref != tAllele2)", "None or options.outDir is None or options.refGenome is None: print(\"ERROR: Please include arguments", "% (function.__name__, str(round((t1-t0)/60.,2))) ) return result return function_timer def UpdateProgressGetN(fileName): if fileName[len(fileName)-1]==\"z\": cmd", "0 and line.startswith('Hugo_Symbol Chromosome Start_position') == False: # checkIt = len([k for k", "in original MAF file.\\\">\\n\") ioObject.write(\"##FORMAT=<ID=GT,Number=1,Type=String,Description=\\\"Genotype\\\">\\n\") ioObject.write(\"##FORMAT=<ID=AD,Number=2,Type=Integer,Description=\\\"Allelic depths of REF and ALT(s) in the", "directly preceding the deletion as the alternative base and the variant pos vcfAlt=refSeq[0]", "= processINS(line, chrom, pos, rsid, mutType, variantType, strand, errorFile, Options) elif variantType==\"TNP\" or", "rsid, mutType, variantType, strand, errorFile, Options) elif variantType==\"DEL\": linetowrite = processDEL(line, chrom, pos,", "parser.add_option('-o', '--output_dir', dest=\"outDir\", default=None, help=\"Output directory for .vcf file\") parser.add_option('-r', '--ref_genome', dest=\"refGenome\", default=\"/Users/schencro/Desktop/Bioinformatics_Tools/Ref_Genomes/Ensembl/GRCh37.75/GRCh37.75.fa\",", "listed\\\">\\n\") ioObject.write(\"##FORMAT=<ID=DP,Number=1,Type=Integer,Description=\\\"Total read depth across this site\\\">\\n\") ioObject.write(\"##FORMAT=<ID=VF,Number=1,Type=Float,Description=\\\"Variant Allele Frequency.\\\">\\n\") ioObject.write(\"#CHROM\\tPOS\\tID\\tREF\\tALT\\tQUAL\\tFILTER\\tINFO\\tFORMAT\\t%s\\t%s\\n\"%(normalID,tumorID)) @fn_timer def", "sys.stdout.write('\\r') j = (i + 1) / n sys.stdout.write(\"[%-20s] %d%%\\t INFO: %s\" %", "as inFile: i = 0 for line in inFile: if i == 1:", "variantType, strand, errorFile, Options) elif variantType==\"DEL\": linetowrite = processDEL(line, chrom, pos, rsid, mutType,", "= SamtoolsFaidx(Options.refGenome, chrom + \":\" + refAnchorPos + \"-\" + line[3], check=False) #", "variant to continue processing. linetowrite = None if variantType==\"SNP\": linetowrite = processSNP(line, chrom,", "Maf File\") if line.startswith('Hugo_Symbol Chromosome Start_position'): count+=1 i += 1 else: i +=", "as errerOut: errerOut.write('\\t'.join(line)+'\\n') else: # This may seem duplicitious, but I explicityly want", "None or options.refGenome is None: print(\"ERROR: Please include arguments for maf file, output", "!= \"NA\"][0] alt_reads = [read for read in [alt_reads, ialt_reads] if read !=", "line[8] # Normal Allele Typically tAllele2 = line[9] # Alt Allele Typically QUAL", "fasta file).\") sys.exit() else: pass return (options, parser) def fn_timer(function): ''' Use this", "inFile: SpotCheckProperReference(inFile, Options, n) with open(Options.maf,'r') as inFile: i = 0 for line", "preceding the deletion as the alternative base and the variant pos vcfAlt=refSeq[0] vcfPos=refAnchorPos", "SNV cases else: total_reads = str(int(ref_reads) + int(alt_reads)) vaf = repr(round(int(alt_reads) / float(total_reads),", "+ line[15].upper() + \";MAF_Genome_Change=\" + line[ 14] + \";MAF_Variant_Type=\" + variantType + \";MAF_Variant_Classification=\"", "variantType = '.' # Determine type of variant to continue processing. linetowrite =", "variantType==\"TNP\" or variantType==\"ONP\": with open(errorFile, 'a') as errerOut: errerOut.write('\\t'.join(line)+'\\n') else: # This may", "0 for line in inFile: if i == 1: toPullIDs = line.rstrip('\\n').split('\\t') break", "i_t_vaf GT = \"./.\" ref_reads = '.' alt_reads = '.' total_reads = '.'", "\"TNP\" and variantClass !=\"ONP\": toContinue = SamtoolsFaidx(Options.refGenome, genomicPos, ref) if count == len(a):", "variantClass = line[6] if variantClass != \"INS\" and variantClass != \"TNP\" and variantClass", "refAnchorPos + \"-\" + line[3], check=False) if refSeq[1:] != altAllele: print(\"ERROR: Deletion alternative", "in original MAF file.\\\">\\n\") ioObject.write(\"##INFO=<ID=MAF_Variant_Type,Number=1,Type=String,Description=\\\"Variant type (SNP,INS,DEL) in original MAF file.\\\">\\n\") ioObject.write(\"##INFO=<ID=MAF_Variant_Classification,Number=1,Type=String,Description=\\\"Variant Classification", "inFile: UpdateProgress(i, n, \"Processing Maf File\") if line.startswith('Hugo_Symbol Chromosome Start_position'): count+=1 i +=", "normalGenotype = \".:.,.:.:%s\"%(line[41]) # Final vcf line out lineOut = [chrom, pos, rsid,", "= 0 i = 0 with open(Options.maf, 'r') as inFile: with open(Options.outDir +", "parser.parse_args() if options.maf is None or options.outDir is None or options.refGenome is None:", "'NA') and (alt_reads != 'NA' or ialt_reads != 'NA'): GT = \"0/1\" ref_reads", "File\") if line.startswith('Hugo_Symbol Chromosome Start_position'): count+=1 i += 1 else: i += 1", "= '.' else: sys.exit(\"ERROR: Problem processing INS %s\" % ('\\t'.join(line))) sampleField = ':'.join([GT,", "%s\"%('\\t'.join(line))) sampleField = ':'.join([GT, ','.join([ref_reads, alt_reads]), total_reads, vaf]) # Create INFO field INFO", "$0 | \\\"LC_ALL=C sort -k1,1 -k2,2n\\\"}' | gzip > %s\"%(vcfFile, vcfFileSorted)) os.system(\"rm %s\"%(vcfFile))", "iref_reads = line[37] ialt_reads = line[36] ref_reads = line[39] alt_reads = line[38] reportedVAF", "check=False) # VCF reference is the preceding base in the insertion in MAF", "reference sequence. %s\" % ('\\t'.join(line))) sys.exit() # VCF reference is the preceding base", "= line.rstrip('\\n').split('\\t') # Genomic Position chrom, pos, id = line[1], line[2], line[10] #", "ID rsid = line[10] if rsid == '': rsid = '.' elif rsid.startswith(\"rs\")", "read != \"NA\"][0] total_reads = str(int(ref_reads) + int(alt_reads)) vaf = str(int(alt_reads)/float(total_reads)) elif i_t_vaf!=\"\"", "refAllele = tAllele1 # Obtain the reference sequence + 1 preceding base for", "and reportedVAF == 'NA': with open(errorFile, 'a') as errerOut: errerOut.write('\\t'.join(line)+'\\n') if Options.verbose: print(\"WARNING:", "and reference genome (single fasta file).\") sys.exit() else: pass return (options, parser) def", "line[7] mutType = line[5] variantClass = line[6] if variantClass != \"INS\" and variantClass", "0 for line in mafFile: if i != 0 and line.startswith('Hugo_Symbol Chromosome Start_position')", "import wraps import datetime import time import numpy as np def OptionParsing(): usage", "= tAllele1 ref_reads = line[39] alt_reads = line[38] reportedVAF = line[28] # Get", "elif ( i_t_vaf == \"\" or i_t_vaf == \"NA\") and ref_reads == 'NA'", "flag to verify reference matching to maf file. Default=False\") parser.add_option('-v', '--verbose', dest='verbose', default=False,", "if the provided reference matches :param refGenome: Reference Fasta file :param genomicPos: Genomic", "'NA': ref_reads = '.' total_reads = alt_reads else: alt_reads = '.' total_reads =", "function you want to get run time information about. :param function: Function of", "Options.outDir + Options.maf.split('/')[len(Options.maf.split('/'))-1].replace('.maf','.vcf') vcfFileSorted = Options.outDir + Options.maf.split('/')[len(Options.maf.split('/'))-1].replace('.head.maf','.sorted.vcf.gz') os.system(\"cat %s | awk '$1", "print('') return(toContinue) # else: # print(checkIt) # print(line) # print([k for k in", "these very strange mutations within the MAF file. elif ref_reads == 'NA' or", "sys.exit(\"ERROR: Malformed MAF entry.\") return(linetowrite) def CreateHeader(ioObject, Options, tumorID, normalID): now = datetime.datetime.now()", "line[14] + \";MAF_Variant_Type=\" + variantType + \";MAF_Variant_Classification=\" + mutType +\";DCC_Project_Code=\" + line[44] #", "print(\"INFO: Verifying maf file.\") if fileLength > 200: n=0.02 else: n=1. a =", "= reportedVAF # Sets VAF equal to 1 if ref_reads == 'NA': ref_reads", "ensure proper reference file is used. Random sampling is employed to ensure proper", "of a file of more than 200 variants. :param mafFile: Input mafFile object", "else: i += 1 linetoWrite = CreateVCFLine(line, errorFile, Options) if linetoWrite is not", "strand, errorFile, Options) elif variantType==\"INS\": linetowrite = processINS(line, chrom, pos, rsid, mutType, variantType,", "= line[4] # Variant Classification/Type (Type is SNP, INS, DEL, etc.) mutType =", "Allele Frequency.\\\">\\n\") ioObject.write(\"#CHROM\\tPOS\\tID\\tREF\\tALT\\tQUAL\\tFILTER\\tINFO\\tFORMAT\\t%s\\t%s\\n\"%(normalID,tumorID)) @fn_timer def ProcessFile(Options): n = UpdateProgressGetN(Options.maf) if Options.spotcheck: with open(Options.maf,", "genomicPos = line[1] + \":\" + line[2] + \"-\" + line[3] ref =", "# Normal variant field if anything if line[41] == \"NA\": normalGenotype = \".:.,.:.:.\"", "proper reference file is used. Random sampling is employed to ensure proper reference", "you want to get run time information about. :param function: Function of interest.", "if vaf != '1.' and strand==\"+\" or strand==\"-\": GT=\"0|1\" else: GT=\"0/1\" sampleField =", "fuck\") i+=1 print('') return(toContinue) def processSNP(line, chrom, pos, rsid, mutType, variantType, strand, errorFile,", "'NA' or ialt_reads!='NA'): GT=\"0/1\" ref_reads = [read for read in [ref_reads, iref_reads] if", "insertion in MAF vcfRef = refSeq[0] # VCF has base directly preceding the", "strand, errorFile, Options): ref = line[7] tAllele1 = line[8] # Normal Allele Typically", "= './.' ref_reads = '.' alt_reads = '.' total_reads = '.' vaf =", "and (alt_reads != 'NA' or ialt_reads != 'NA'): GT = \"0/1\" ref_reads =", "'NA' and iref_reads == 'NA' and alt_reads == 'NA' and ialt_reads == 'NA':", "reportedVAF == '1': GT = \"1/1\" # Appears to be homozygous for alternative", "reference matches :param refGenome: Reference Fasta file :param genomicPos: Genomic Position of interest.", "conversion.\") parser.add_option('-s', '--spotCheckMaf', dest='spotcheck', default=False, action='store_true', help=\"Use this flag to verify reference matching", "allele (germline unlikely since it is called w.r.t normal?) vaf = reportedVAF #", "vaf = str(int(alt_reads) / float(total_reads)) elif i_t_vaf != \"\" and i_t_vaf != \"NA\"", "line.startswith('Hugo_Symbol Chromosome Start_position'): count+=1 i += 1 else: i += 1 linetoWrite =", "cases else: total_reads = str(int(ref_reads) + int(alt_reads)) vaf = repr(round(int(alt_reads) / float(total_reads), 4))", "strand, errorFile, Options) elif variantType==\"TNP\" or variantType==\"ONP\": with open(errorFile, 'a') as errerOut: errerOut.write('\\t'.join(line)+'\\n')", "+ line[0] + \";MAF_ref_context=\" + line[15].upper() + \";MAF_Genome_Change=\" + line[ 14] + \";MAF_Variant_Type=\"", "'r') as inFile: SpotCheckProperReference(inFile, Options, n) with open(Options.maf,'r') as inFile: i = 0", "i_t_vaf == \"\" or i_t_vaf == \"NA\") and ref_reads == 'NA' and iref_reads", "chrom, pos, rsid, mutType, variantType, strand, errorFile, Options) elif variantType==\"TNP\" or variantType==\"ONP\": with", "altAllele = tAllele2 refAllele = tAllele1 # Obtain the reference sequence + 1", "A function to wrap around a function. ''' @wraps(function) def function_timer(*args, **kwargs): t0", "MAF entry.\") return(linetowrite) def CreateHeader(ioObject, Options, tumorID, normalID): now = datetime.datetime.now() ioObject.write(\"##fileformat=VCFv4.2\\n\") ioObject.write(\"##fileDate=%s\\n\"%(now.date()))", "+ 1 preceding base for the DEL refAnchorPos = str(int(pos)-1) # Fetch the", "genome (single fasta file).\") sys.exit() else: pass return (options, parser) def fn_timer(function): '''", "DEL, etc.) mutType = line[5] variantType = line[6] # Create proper vcf formatted", "MAF line if (ref != tAllele1 and ref != tAllele2) or (strand !=", "line[15].upper() + \";MAF_Genome_Change=\" + line[ 14] + \";MAF_Variant_Type=\" + variantType + \";MAF_Variant_Classification=\" +", "else: # print(checkIt) # print(line) # print([k for k in a]) # sys.exit(\"Problem", "anything if line[41]==\"NA\": normalGenotype = \".:.,.:.:.\" else: normalGenotype = \".:.,.:.:%s\"%(line[41]) # Final vcf", "reportedVAF = line[28] # Get phasing information and determine reads for vaf==1 if", "fileLength: Length of the file being read :return: None ''' print(\"INFO: Verifying maf", "original MAF file.\\\">\\n\") ioObject.write(\"##INFO=<ID=MAF_ref_context,Number=1,Type=String,Description=\\\"Reference context in original MAF file.\\\">\\n\") ioObject.write(\"##INFO=<ID=MAF_Genome_Change,Number=1,Type=String,Description=\\\"Genome change in original", "== 'NA' or alt_reads == 'NA' and reportedVAF == 'NA': with open(errorFile, 'a')", "k in a if k==i]) # if checkIt==1: UpdateProgress(count, len(a), \"INFO: Verifying maf", "= str(int(pos)-1) # Fetch the base that precedes the deletion. refSeq = SamtoolsFaidx(Options.refGenome,", "Variant Classification/Type (Type is SNP, INS, DEL, etc.) mutType = line[5] variantType =", "dest='spotcheck', default=False, action='store_true', help=\"Use this flag to verify reference matching to maf file.", "sys.exit() # VCF reference is the preceding base plus the reported deletion in", "[chrom, vcfPos, rsid, vcfRef, vcfAlt, QUAL, '.', INFO, \"GT:AD:DP:VF\", normalGenotype, sampleField] return (lineOut)", "= line[28] # Get phasing information and determine reads for vaf==1 if ref_reads", "and variantClass != \"TNP\" and variantClass !=\"ONP\": toContinue = SamtoolsFaidx(Options.refGenome, genomicPos, ref) if", "== '-': altAllele = tAllele2 else: altAllele = tAllele1 # Obtain the reference", "phasing information and determine reads for vaf==1 if (ref_reads != 'NA' or iref_reads", "maf file. Default=False\") parser.add_option('-v', '--verbose', dest='verbose', default=False, action='store_true', help=\"Use this flag to turn", "base that precedes the deletion. refSeq = SamtoolsFaidx(Options.refGenome, chrom + \":\" + refAnchorPos", "break else: header = line i+=1 tumorID = toPullIDs[12] normalID = toPullIDs[13] count", "this site\\\">\\n\") ioObject.write(\"##FORMAT=<ID=VF,Number=1,Type=Float,Description=\\\"Variant Allele Frequency.\\\">\\n\") ioObject.write(\"#CHROM\\tPOS\\tID\\tREF\\tALT\\tQUAL\\tFILTER\\tINFO\\tFORMAT\\t%s\\t%s\\n\"%(normalID,tumorID)) @fn_timer def ProcessFile(Options): n = UpdateProgressGetN(Options.maf) if", "% (fileName) pipe = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout else: cmd = \"wc -l %s\"", "line if (ref != tAllele1 and ref != tAllele2) or (strand != '+'", "'.' vaf = '.' else: sys.exit(\"ERROR: Problem processing INS %s\" % ('\\t'.join(line))) sampleField", "!= \"INS\" and variantClass != \"TNP\" and variantClass !=\"ONP\": toContinue = SamtoolsFaidx(Options.refGenome, genomicPos,", "+ \";MAF_Genome_Change=\" + line[ 14] + \";MAF_Variant_Type=\" + variantType + \";MAF_Variant_Classification=\" + mutType", "n = UpdateProgressGetN(Options.maf) if Options.spotcheck: with open(Options.maf, 'r') as inFile: SpotCheckProperReference(inFile, Options, n)", "very strange mutations within the MAF file. elif ref_reads == 'NA' or alt_reads", "(ref_reads != 'NA' or iref_reads!='NA') and (alt_reads != 'NA' or ialt_reads!='NA'): GT=\"0/1\" ref_reads", "genome to be used for maf2vcf conversion.\") parser.add_option('-s', '--spotCheckMaf', dest='spotcheck', default=False, action='store_true', help=\"Use", "+ \"-\" + line[3], check=False) # VCF reference is the preceding base in", "Chromosome Start_position') == False: print(\"\") print(\"ERROR: No header found in maf file.\") elif", "MAF file. elif ref_reads == 'NA' or alt_reads == 'NA' and reportedVAF ==", "= 0 for line in inFile: if i == 1: toPullIDs = line.rstrip('\\n').split('\\t')", "'.', INFO, \"GT:AD:DP:VF\", normalGenotype, sampleField] return(lineOut) def processINS(line, chrom, pos, rsid, mutType, variantType,", "Function of interest. :return: A function to wrap around a function. ''' @wraps(function)", "OptionParser import subprocess from functools import wraps import datetime import time import numpy", "line[8] # Normal Allele tAllele2 = line[9] # Alt Allele QUAL = line[42]", "sys.exit(\"ERROR: Problem processing INS %s\" % ('\\t'.join(line))) sampleField = ':'.join([GT, ','.join([ref_reads, alt_reads]), total_reads,", "{print $0 | \\\"LC_ALL=C sort -k1,1 -k2,2n\\\"}' | gzip > %s\"%(vcfFile, vcfFileSorted)) os.system(\"rm", "= line[39] alt_reads = line[38] reportedVAF = line[28] i_t_vaf = line[43] # Get", "INFO, \"GT:AD:DP:VF\", normalGenotype, sampleField] return (lineOut) def CreateVCFLine(line, errorFile, Options): line = line.rstrip('\\n').split('\\t')", "file to ensure proper reference file is used. Random sampling is employed to", "return(linetowrite) def CreateHeader(ioObject, Options, tumorID, normalID): now = datetime.datetime.now() ioObject.write(\"##fileformat=VCFv4.2\\n\") ioObject.write(\"##fileDate=%s\\n\"%(now.date())) ioObject.write(\"##source=maf2vcf.py\\n\") ioObject.write(\"##reference=%s\\n\"%(Options.refGenome))", "+ 1) / n sys.stdout.write(\"[%-20s] %d%%\\t INFO: %s\" % ('=' * int(20 *", "''' Use this as a wrapper at the top of any function you", "\"NA\": normalGenotype = \".:.,.:.:.\" else: normalGenotype = \".:.,.:.:%s\" % (line[41]) lineOut = [chrom,", "variantClass != \"INS\" and variantClass != \"TNP\" and variantClass !=\"ONP\": toContinue = SamtoolsFaidx(Options.refGenome,", "= 'usage: %prog -i <*.maf> -o <directory> -r <ref.fa>' parser = OptionParser(usage) parser.add_option('-i',", "'.' alt_reads = '.' total_reads = '.' elif ( i_t_vaf == \"\" or", "ALT(s) in the order listed\\\">\\n\") ioObject.write(\"##FORMAT=<ID=DP,Number=1,Type=Integer,Description=\\\"Total read depth across this site\\\">\\n\") ioObject.write(\"##FORMAT=<ID=VF,Number=1,Type=Float,Description=\\\"Variant Allele", "alt_reads = '.' total_reads = '.' vaf = '.' else: sys.exit(\"ERROR: Problem processing", "of what I'm choosing to filter out as possible... if Options.verbose: print(\"WARNING: Malformed", "%s\" % '\\t'.join(line)) return(None) # Create INFO field INFO = \"MAF_Hugo_Symbol=\" + line[0]", "line out lineOut = [chrom, pos, rsid, refAllele, altAllele, QUAL, '.', INFO, \"GT:AD:DP:VF\",", "GT='./.' ref_reads='.' alt_reads='.' total_reads='.' vaf='.' else: sys.exit(\"ERROR: Problem processing DEL %s\"%('\\t'.join(line))) sampleField =", "vcfPos=refAnchorPos # Get read information iref_reads = line[37] ialt_reads = line[36] ref_reads =", "gzip > %s\"%(vcfFile, vcfFileSorted)) os.system(\"rm %s\"%(vcfFile)) os.system(\"gzip %s\"%(errorFile)) def main(): print(\"INFO: Processing MAF", "vcfRef = refSeq # VCF has base directly preceding the deletion as the", "precedes the deletion. refSeq = SamtoolsFaidx(Options.refGenome, chrom + \":\" + refAnchorPos + \"-\"", "variantType = line[6] # Create proper vcf formatted information if mutType == '':", "pos, rsid, mutType, variantType, strand, errorFile, Options) elif variantType==\"TNP\" or variantType==\"ONP\": with open(errorFile,", "\\ line[44] # Normal variant field if anything if line[41] == \"NA\": normalGenotype", "in original MAF file.\\\">\\n\") ioObject.write(\"##INFO=<ID=DCC_Project_Code,Number=1,Type=String,Description=\\\"DCC Project Code in original MAF file.\\\">\\n\") ioObject.write(\"##FORMAT=<ID=GT,Number=1,Type=String,Description=\\\"Genotype\\\">\\n\") ioObject.write(\"##FORMAT=<ID=AD,Number=2,Type=Integer,Description=\\\"Allelic", "'--verbose', dest='verbose', default=False, action='store_true', help=\"Use this flag to turn on verbose mode. Default=False\")", "read depth across this site\\\">\\n\") ioObject.write(\"##FORMAT=<ID=VF,Number=1,Type=Float,Description=\\\"Variant Allele Frequency.\\\">\\n\") ioObject.write(\"#CHROM\\tPOS\\tID\\tREF\\tALT\\tQUAL\\tFILTER\\tINFO\\tFORMAT\\t%s\\t%s\\n\"%(normalID,tumorID)) @fn_timer def ProcessFile(Options): n", "read in [ref_reads, iref_reads] if read != \"NA\"][0] alt_reads = [read for read", "reportedVAF # Sets VAF equal to 1 if ref_reads == 'NA': ref_reads =", "Appears to be homozygous for alternative allele (germline unlikely since it is called", "sequence and perform check if needed. :param check: Whether or not to throw", "# Create INFO field INFO = \"MAF_Hugo_Symbol=\" + line[0] + \";MAF_ref_context=\" + line[15].upper()", "proc.stdout.readlines() refSeq = ''.join([line.decode('utf-8').rstrip('\\n') for line in outInfo[1:]]) if check: if refSeq ==", "% ('\\t'.join(line))) sys.exit() # VCF reference is the preceding base plus the reported", "read != \"NA\"][0] alt_reads = [read for read in [alt_reads, ialt_reads] if read", "open(errorFile, 'a') as errerOut: errerOut.write('\\t'.join(line)+'\\n') # print(line) # sys.exit(\"ERROR: Malformed MAF entry.\") return(linetowrite)", "not None: outVCF.write('\\t'.join(linetoWrite)+'\\n') print('') print(\"INFO: Sorting vcf file.\") vcfFile = Options.outDir + Options.maf.split('/')[len(Options.maf.split('/'))-1].replace('.maf','.vcf')", "fn_timer(function): ''' Use this as a wrapper at the top of any function", "used for maf2vcf conversion.\") parser.add_option('-s', '--spotCheckMaf', dest='spotcheck', default=False, action='store_true', help=\"Use this flag to", "proc = subprocess.Popen(['samtools','faidx',refGenome, genomicPos], stdout=subprocess.PIPE) proc.wait() outInfo = proc.stdout.readlines() refSeq = ''.join([line.decode('utf-8').rstrip('\\n') for", "# VCF reference is the preceding base plus the reported deletion in the", "minutes\" % (function.__name__, str(round((t1-t0)/60.,2))) ) return result return function_timer def UpdateProgressGetN(fileName): if fileName[len(fileName)-1]==\"z\":", "subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout return(int(pipe.read().decode(\"utf-8\").lstrip(\" \").split(\" \")[0])) def UpdateProgress(i, n, DisplayText): ''' Prints a", "= SamtoolsFaidx(Options.refGenome, genomicPos, ref) if count == len(a): print('') return(toContinue) # else: #", "genome shows %s' % (ref, genomicPos, refSeq)) sys.exit() return(None) else: return(refSeq) def SpotCheckProperReference(mafFile,", "'NA' and ialt_reads=='NA': GT='./.' ref_reads='.' alt_reads='.' total_reads='.' vaf='.' else: sys.exit(\"ERROR: Problem processing DEL", "%s' % (ref, genomicPos, refSeq)) sys.exit() return(None) else: return(refSeq) def SpotCheckProperReference(mafFile, Options, fileLength):", "across this site\\\">\\n\") ioObject.write(\"##FORMAT=<ID=VF,Number=1,Type=Float,Description=\\\"Variant Allele Frequency.\\\">\\n\") ioObject.write(\"#CHROM\\tPOS\\tID\\tREF\\tALT\\tQUAL\\tFILTER\\tINFO\\tFORMAT\\t%s\\t%s\\n\"%(normalID,tumorID)) @fn_timer def ProcessFile(Options): n = UpdateProgressGetN(Options.maf)", "the file being read :return: None ''' print(\"INFO: Verifying maf file.\") if fileLength", "verbose mode. Default=False\") (options, args) = parser.parse_args() if options.maf is None or options.outDir", "Create proper vcf formatted information if mutType == '': mutType = '.' if", "VCF reference is the preceding base plus the reported deletion in the MAF", "or iref_reads != 'NA') and (alt_reads != 'NA' or ialt_reads != 'NA'): GT", "def UpdateProgressGetN(fileName): if fileName[len(fileName)-1]==\"z\": cmd = \"gzip -cd %s | wc -l\" %", "else: total_reads = str(int(ref_reads) + int(alt_reads)) vaf = repr(round(int(alt_reads) / float(total_reads), 4)) if", "Alt Allele QUAL = line[42] if QUAL == 'None' or QUAL == 'NA'", "lineOut = [chrom, pos, rsid, refAllele, altAllele, QUAL, '.', INFO, \"GT:AD:DP:VF\", normalGenotype, sampleField]", ":param mafFile: Input mafFile object (opened) :param Options: Parser Options :param fileLength: Length", "alt_reads]), total_reads, vaf]) # Create INFO field INFO = \"MAF_Hugo_Symbol=\" + line[0] +", "\"./.\" ref_reads = '.' alt_reads = '.' total_reads = '.' elif ( i_t_vaf", "+ \":\" + refAnchorPos + \"-\" + line[3], check=False) if refSeq[1:] != altAllele:", "Options) elif variantType==\"DEL\": linetowrite = processDEL(line, chrom, pos, rsid, mutType, variantType, strand, errorFile,", "Processing MAF file.\") FilePath = os.path.dirname(os.path.abspath(__file__)) (Options, Parser) = OptionParsing() ProcessFile(Options) if __name__==\"__main__\":", "= CreateVCFLine(line, errorFile, Options) if linetoWrite is not None: outVCF.write('\\t'.join(linetoWrite)+'\\n') print('') print(\"INFO: Sorting", "and iref_reads=='NA' and alt_reads == 'NA' and ialt_reads=='NA': GT='./.' ref_reads='.' alt_reads='.' total_reads='.' vaf='.'", "/ n sys.stdout.write(\"[%-20s] %d%%\\t INFO: %s\" % ('=' * int(20 * j), 100", "'.' else: sys.exit(\"ERROR: Problem processing INS %s\" % ('\\t'.join(line))) sampleField = ':'.join([GT, ','.join([ref_reads,", "pos, rsid, refAllele, altAllele, QUAL, '.', INFO, \"GT:AD:DP:VF\", normalGenotype, sampleField] return(lineOut) def processDEL(line,", "vaf != '1.' and strand==\"+\" or strand==\"-\": GT=\"0|1\" else: GT=\"0/1\" sampleField = ':'.join([GT,", "(options, args) = parser.parse_args() if options.maf is None or options.outDir is None or", "determine reads for vaf==1 if ref_reads == 'NA' or alt_reads == 'NA' and", "matches :param refGenome: Reference Fasta file :param genomicPos: Genomic Position of interest. :param", "vaf = reportedVAF # Sets VAF equal to 1 if ref_reads == 'NA':", ":return: A function to wrap around a function. ''' @wraps(function) def function_timer(*args, **kwargs):", "vaf==1 if (ref_reads != 'NA' or iref_reads!='NA') and (alt_reads != 'NA' or ialt_reads!='NA'):", "elif variantType==\"TNP\" or variantType==\"ONP\": with open(errorFile, 'a') as errerOut: errerOut.write('\\t'.join(line)+'\\n') else: # This", "# sys.exit(\"Problem here\") elif i != 0 and line.startswith('Hugo_Symbol Chromosome Start_position') == False:", "pos vcfAlt = refSeq[0]+altAllele vcfPos = refAnchorPos # Get read information iref_reads =", "== 'NA' and ialt_reads=='NA': vaf=i_t_vaf GT=\"./.\" ref_reads = '.' alt_reads = '.' total_reads", "Total number of steps. :param DisplayText: A string that you want to print", "and variantClass !=\"ONP\": toContinue = SamtoolsFaidx(Options.refGenome, genomicPos, ref) if count == len(a): print('')", "return(lineOut) def processINS(line, chrom, pos, rsid, mutType, variantType, strand, errorFile, Options): ref =", "iref_reads != 'NA') and (alt_reads != 'NA' or ialt_reads != 'NA'): GT =", "= ':'.join([GT, ','.join([ref_reads, alt_reads]), total_reads, vaf]) # Create INFO field INFO = \"MAF_Hugo_Symbol=\"", "MAF file.\\\">\\n\") ioObject.write(\"##INFO=<ID=MAF_ref_context,Number=1,Type=String,Description=\\\"Reference context in original MAF file.\\\">\\n\") ioObject.write(\"##INFO=<ID=MAF_Genome_Change,Number=1,Type=String,Description=\\\"Genome change in original MAF", "2% of a file of more than 200 variants. :param mafFile: Input mafFile", "float(total_reads), 4)) if vaf != '1.' and strand==\"+\" or strand==\"-\": GT=\"0|1\" else: GT=\"0/1\"", "reportedVAF = line[28] i_t_vaf = line[43] # Get phasing information and determine reads", "DEL refAnchorPos = str(int(pos) - 1) # Fetch the base that precedes the", "= line[1] + \":\" + line[2] + \"-\" + line[3] ref = line[7]", "the deletion as the alternative base and the variant pos vcfAlt=refSeq[0] vcfPos=refAnchorPos #", "vcf4.2 file using python >=3.6. Created by <NAME> 8 March 2018 ''' import", "for line in mafFile: if i != 0 and line.startswith('Hugo_Symbol Chromosome Start_position') ==", "running %s: %s minutes\" % (function.__name__, str(round((t1-t0)/60.,2))) ) return result return function_timer def", "alt_reads = line[38] reportedVAF = line[28] i_t_vaf = line[43] # Get phasing information", "or strand==\"-\": GT=\"0|1\" else: GT=\"0/1\" sampleField = ':'.join([GT, ','.join([ref_reads, alt_reads]), total_reads, vaf]) #", "Reference genome shows %s' % (ref, genomicPos, refSeq)) sys.exit() return(None) else: return(refSeq) def", "Tossing these very strange mutations within the MAF file. elif ref_reads == 'NA'", "else: alt_reads = '.' total_reads = ref_reads sampleField = ':'.join([GT, ','.join([ref_reads, alt_reads]), total_reads,", "preceding the deletion as the alternative base and the variant pos vcfAlt =", "linetoWrite is not None: outVCF.write('\\t'.join(linetoWrite)+'\\n') print('') print(\"INFO: Sorting vcf file.\") vcfFile = Options.outDir", "i+=1 tumorID = toPullIDs[12] normalID = toPullIDs[13] count = 0 i = 0", "numpy as np def OptionParsing(): usage = 'usage: %prog -i <*.maf> -o <directory>", "= \"1/1\" # Appears to be homozygous for alternative allele (germline unlikely since", "= proc.stdout.readlines() refSeq = ''.join([line.decode('utf-8').rstrip('\\n') for line in outInfo[1:]]) if check: if refSeq", "and the variant pos vcfAlt=refSeq[0] vcfPos=refAnchorPos # Get read information iref_reads = line[37]", "Will spot check 2% of a file of more than 200 variants. :param", "total_reads = '.' elif (i_t_vaf==\"\" or i_t_vaf==\"NA\") and ref_reads == 'NA' and iref_reads=='NA'", "ensure proper reference is used. Will spot check 2% of a file of", "def processINS(line, chrom, pos, rsid, mutType, variantType, strand, errorFile, Options): ref = line[7]", "'NA' or QUAL == '': QUAL = '.' if ref == tAllele1: altAllele", "match reference sequence. %s\" % ('\\t'.join(line))) sys.exit() # VCF reference is the preceding", "!= '-'): with open(errorFile, 'a') as errerOut: errerOut.write('\\t'.join(line)+'\\n') if Options.verbose: print(\"WARNING: %s\" %", "+ line[3] ref = line[7] mutType = line[5] variantClass = line[6] if variantClass", "explicityly want to know as much of what I'm choosing to filter out", "file.\\\">\\n\") ioObject.write(\"##INFO=<ID=MAF_ref_context,Number=1,Type=String,Description=\\\"Reference context in original MAF file.\\\">\\n\") ioObject.write(\"##INFO=<ID=MAF_Genome_Change,Number=1,Type=String,Description=\\\"Genome change in original MAF file.\\\">\\n\")", "base and the variant pos vcfAlt = refSeq[0]+altAllele vcfPos = refAnchorPos # Get", "to a vcf4.2 file using python >=3.6. Created by <NAME> 8 March 2018", ":param check: Whether or not to throw error if the provided reference matches", "'.' if ref == tAllele1: altAllele = tAllele1 refAllele = tAllele2 else: altAllele", "tAllele1 ref_reads = line[39] alt_reads = line[38] reportedVAF = line[28] # Get phasing", "+ variantType + \";MAF_Variant_Classification=\" + mutType +\";DCC_Project_Code=\" + line[44] # Normal variant field", "+\";DCC_Project_Code=\" + line[44] # Normal variant field if anything if line[41]==\"NA\": normalGenotype =", "Options) elif variantType==\"INS\": linetowrite = processINS(line, chrom, pos, rsid, mutType, variantType, strand, errorFile,", "ref_reads = '.' alt_reads = '.' total_reads = '.' vaf = '.' else:", "original MAF file.\\\">\\n\") ioObject.write(\"##FORMAT=<ID=GT,Number=1,Type=String,Description=\\\"Genotype\\\">\\n\") ioObject.write(\"##FORMAT=<ID=AD,Number=2,Type=Integer,Description=\\\"Allelic depths of REF and ALT(s) in the order", "alt_reads]), total_reads, vaf]) # Last check for interesting but unresolved MAF line if", "is employed to ensure proper reference is used. Will spot check 2% of", "a = list(a[:int(fileLength*n)]) i = 0 count = 0 for line in mafFile:", "line[5] variantClass = line[6] if variantClass != \"INS\" and variantClass != \"TNP\" and", "\"Processing Maf File\") if line.startswith('Hugo_Symbol Chromosome Start_position'): count+=1 i += 1 else: i", "variant field if anything if line[41]==\"NA\": normalGenotype = \".:.,.:.:.\" else: normalGenotype = \".:.,.:.:%s\"%(line[41])", "% ('=' * int(20 * j), 100 * j, DisplayText)) sys.stdout.flush() def SamtoolsFaidx(refGenome,", "that you want to print out that is informative. :return: None ''' sys.stdout.write('\\r')", "# Determine type of variant to continue processing. linetowrite = None if variantType==\"SNP\":", "Genomic Position chrom, pos, id = line[1], line[2], line[10] # Get rs ID", "j), 100 * j, DisplayText)) sys.stdout.flush() def SamtoolsFaidx(refGenome, genomicPos, ref='', check=True): ''' Obtain", "= [chrom, vcfPos, rsid, vcfRef, vcfAlt, QUAL, '.', INFO, \"GT:AD:DP:VF\", normalGenotype, sampleField] return(lineOut)", "errerOut.write('\\t'.join(line)+'\\n') # print(line) # sys.exit(\"ERROR: Malformed MAF entry.\") return(linetowrite) def CreateHeader(ioObject, Options, tumorID,", "mutations within the MAF file. elif ref_reads == 'NA' or alt_reads == 'NA'", "== \"\" or i_t_vaf == \"NA\") and ref_reads == 'NA' and iref_reads ==", "Deletion alternative allele does not match reference sequence. %s\" % ('\\t'.join(line))) sys.exit() #", "this flag to verify reference matching to maf file. Default=False\") parser.add_option('-v', '--verbose', dest='verbose',", "Simple SNV cases else: total_reads = str(int(ref_reads) + int(alt_reads)) vaf = repr(round(int(alt_reads) /", "read in [alt_reads, ialt_reads] if read != \"NA\"][0] total_reads = str(int(ref_reads) + int(alt_reads))", "ref_reads = line[39] alt_reads = line[38] reportedVAF = line[28] # Get phasing information", "Last check for interesting but unresolved MAF line if (ref != tAllele1 and", "vaf='.' else: sys.exit(\"ERROR: Problem processing DEL %s\"%('\\t'.join(line))) sampleField = ':'.join([GT, ','.join([ref_reads, alt_reads]), total_reads,", "of interest. :return: A function to wrap around a function. ''' @wraps(function) def", "functools import wraps import datetime import time import numpy as np def OptionParsing():", "in inFile: UpdateProgress(i, n, \"Processing Maf File\") if line.startswith('Hugo_Symbol Chromosome Start_position'): count+=1 i", "reference sequence + 1 preceding base for the DEL refAnchorPos = str(int(pos)-1) #", "%d%%\\t INFO: %s\" % ('=' * int(20 * j), 100 * j, DisplayText))", "sampleField] return (lineOut) def CreateVCFLine(line, errorFile, Options): line = line.rstrip('\\n').split('\\t') # Genomic Position", "i+=1 print('') return(toContinue) def processSNP(line, chrom, pos, rsid, mutType, variantType, strand, errorFile, Options):", "Options.verbose: print(\"ERROR: %s\"%(line)) sys.exit(\"ERROR: Problem in id column\") # Strand Information strand =", "No header found in maf file.\") elif line.startswith('Hugo_Symbol Chromosome Start_position') == True: toContinue", "is None: print(\"ERROR: Please include arguments for maf file, output directory, and reference", ":param n: Total number of steps. :param DisplayText: A string that you want", "and perform check if needed. :param check: Whether or not to throw error", "I explicityly want to know as much of what I'm choosing to filter", "i_t_vaf != \"\" and i_t_vaf != \"NA\" and ref_reads == 'NA' and iref_reads", "list(a[:int(fileLength*n)]) i = 0 count = 0 for line in mafFile: if i", "directory, and reference genome (single fasta file).\") sys.exit() else: pass return (options, parser)", "''' @wraps(function) def function_timer(*args, **kwargs): t0 = time.time() result = function(*args, **kwargs) t1", "mutType +\";DCC_Project_Code=\" + line[44] # Normal variant field if anything if line[41]==\"NA\": normalGenotype", "cmd = \"wc -l %s\" % (fileName) pipe = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout return(int(pipe.read().decode(\"utf-8\").lstrip(\"", "unresolved MAF line if (ref != tAllele1 and ref != tAllele2) or (strand", "tAllele1 = line[8] # Normal Allele Typically tAllele2 = line[9] # Alt Allele", "Get rs ID rsid = line[10] if rsid == '': rsid = '.'", ") return result return function_timer def UpdateProgressGetN(fileName): if fileName[len(fileName)-1]==\"z\": cmd = \"gzip -cd", "of REF and ALT(s) in the order listed\\\">\\n\") ioObject.write(\"##FORMAT=<ID=DP,Number=1,Type=Integer,Description=\\\"Total read depth across this", "= line[28] i_t_vaf = line[43] # Get phasing information and determine reads for", "# VCF reference is the preceding base in the insertion in MAF vcfRef", "Normal Allele tAllele2 = line[9] # Alt Allele QUAL = line[42] if QUAL", "or (strand != '+' and strand != '-'): with open(errorFile, 'a') as errerOut:", "if Options.spotcheck: with open(Options.maf, 'r') as inFile: SpotCheckProperReference(inFile, Options, n) with open(Options.maf,'r') as", "normalID) for line in inFile: UpdateProgress(i, n, \"Processing Maf File\") if line.startswith('Hugo_Symbol Chromosome", "j = (i + 1) / n sys.stdout.write(\"[%-20s] %d%%\\t INFO: %s\" % ('='", "'NA' and iref_reads=='NA' and alt_reads == 'NA' and ialt_reads=='NA': GT='./.' ref_reads='.' alt_reads='.' total_reads='.'", "processSNP(line, chrom, pos, rsid, mutType, variantType, strand, errorFile, Options) elif variantType==\"DEL\": linetowrite =", "in the order listed\\\">\\n\") ioObject.write(\"##FORMAT=<ID=DP,Number=1,Type=Integer,Description=\\\"Total read depth across this site\\\">\\n\") ioObject.write(\"##FORMAT=<ID=VF,Number=1,Type=Float,Description=\\\"Variant Allele Frequency.\\\">\\n\")", "print(\"INFO: Processing MAF file.\") FilePath = os.path.dirname(os.path.abspath(__file__)) (Options, Parser) = OptionParsing() ProcessFile(Options) if", "MAF file.\\\">\\n\") ioObject.write(\"##INFO=<ID=MAF_Variant_Type,Number=1,Type=String,Description=\\\"Variant type (SNP,INS,DEL) in original MAF file.\\\">\\n\") ioObject.write(\"##INFO=<ID=MAF_Variant_Classification,Number=1,Type=String,Description=\\\"Variant Classification (if SNP)", "== '': variantType = '.' # Determine type of variant to continue processing.", "This may seem duplicitious, but I explicityly want to know as much of", "errorFile, Options): line = line.rstrip('\\n').split('\\t') # Genomic Position chrom, pos, id = line[1],", "Chromosome Start_position') == False: # checkIt = len([k for k in a if", "return(None) # Simple SNV cases else: total_reads = str(int(ref_reads) + int(alt_reads)) vaf =", "QUAL = '.' if tAllele1 == '-': altAllele = tAllele2 else: altAllele =", "Options) if linetoWrite is not None: outVCF.write('\\t'.join(linetoWrite)+'\\n') print('') print(\"INFO: Sorting vcf file.\") vcfFile", "and strand != '-'): with open(errorFile, 'a') as errerOut: errerOut.write('\\t'.join(line)+'\\n') if Options.verbose: print(\"WARNING:", "Sets VAF equal to 1 if ref_reads == 'NA': ref_reads = '.' total_reads", "\"MAF_Hugo_Symbol=\" + line[0] + \";MAF_ref_context=\" + line[15].upper() + \";MAF_Genome_Change=\" + line[14] + \";MAF_Variant_Type=\"", "= refAnchorPos # Get read information iref_reads = line[37] ialt_reads = line[36] ref_reads", "tumorID = toPullIDs[12] normalID = toPullIDs[13] count = 0 i = 0 with", ":param DisplayText: A string that you want to print out that is informative.", "or ialt_reads!='NA'): GT=\"0/1\" ref_reads = [read for read in [ref_reads, iref_reads] if read", "i_t_vaf != \"NA\" and ref_reads == 'NA' and iref_reads == 'NA' and alt_reads", "chrom, pos, rsid, mutType, variantType, strand, errorFile, Options) elif variantType==\"INS\": linetowrite = processINS(line,", "!= '+' and strand != '-'): with open(errorFile, 'a') as errerOut: errerOut.write('\\t'.join(line)+'\\n') if", "else: altAllele = tAllele2 refAllele = tAllele1 ref_reads = line[39] alt_reads = line[38]", "MAF file.\\\">\\n\") ioObject.write(\"##INFO=<ID=DCC_Project_Code,Number=1,Type=String,Description=\\\"DCC Project Code in original MAF file.\\\">\\n\") ioObject.write(\"##FORMAT=<ID=GT,Number=1,Type=String,Description=\\\"Genotype\\\">\\n\") ioObject.write(\"##FORMAT=<ID=AD,Number=2,Type=Integer,Description=\\\"Allelic depths of", "'w') as errorOut: errorOut.write(header) CreateHeader(outVCF, Options, tumorID, normalID) for line in inFile: UpdateProgress(i,", "== ref: return(True) else: print('ERROR: May not be proper reference genome') print('ERROR: Improper", "+ \";MAF_Variant_Type=\" + variantType + \";MAF_Variant_Classification=\" + mutType + \";DCC_Project_Code=\" + \\ line[44]", "ref_reads == 'NA' or alt_reads == 'NA' and reportedVAF == '1': GT =", "\")[0])) def UpdateProgress(i, n, DisplayText): ''' Prints a progress bar where appropriate. :param", "object (opened) :param Options: Parser Options :param fileLength: Length of the file being", "from optparse import OptionParser import subprocess from functools import wraps import datetime import", "\"INS\" and variantClass != \"TNP\" and variantClass !=\"ONP\": toContinue = SamtoolsFaidx(Options.refGenome, genomicPos, ref)", "REF and ALT(s) in the order listed\\\">\\n\") ioObject.write(\"##FORMAT=<ID=DP,Number=1,Type=Integer,Description=\\\"Total read depth across this site\\\">\\n\")", "INFO field INFO = \"MAF_Hugo_Symbol=\" + line[0] + \";MAF_ref_context=\" + line[15].upper() + \";MAF_Genome_Change=\"", "Options.verbose: print(\"WARNING: Malformed MAF entry. %s\"%('\\t'.join(line))) print('') with open(errorFile, 'a') as errerOut: errerOut.write('\\t'.join(line)+'\\n')", "%s at %s. Reference genome shows %s' % (ref, genomicPos, refSeq)) sys.exit() return(None)", "file of more than 200 variants. :param mafFile: Input mafFile object (opened) :param", "Default=False\") parser.add_option('-v', '--verbose', dest='verbose', default=False, action='store_true', help=\"Use this flag to turn on verbose", "= 0 for line in mafFile: if i != 0 and line.startswith('Hugo_Symbol Chromosome", "deletion as the alternative base and the variant pos vcfAlt=refSeq[0] vcfPos=refAnchorPos # Get", "Whether or not to throw error if the provided reference matches :param refGenome:", "iref_reads=='NA' and alt_reads == 'NA' and ialt_reads=='NA': GT='./.' ref_reads='.' alt_reads='.' total_reads='.' vaf='.' else:", "DisplayText): ''' Prints a progress bar where appropriate. :param i: Current Step :param", "of steps. :param DisplayText: A string that you want to print out that", "Options): line = line.rstrip('\\n').split('\\t') # Genomic Position chrom, pos, id = line[1], line[2],", "print(\"WARNING: %s\" % '\\t'.join(line)) return(None) # Simple SNV cases else: total_reads = str(int(ref_reads)", "QUAL == '': QUAL = '.' if tAllele1 == '-': altAllele = tAllele2", "line = line.rstrip('\\n').split('\\t') # Genomic Position chrom, pos, id = line[1], line[2], line[10]", "being read :return: None ''' print(\"INFO: Verifying maf file.\") if fileLength > 200:", "Genomic Position of interest. :param ref: Reference sequence to compare to fetched sequence.", "altAllele, QUAL, '.', INFO, \"GT:AD:DP:VF\", normalGenotype, sampleField] return(lineOut) def processDEL(line, chrom, pos, rsid,", "vcfRef = refSeq[0] # VCF has base directly preceding the deletion as the", "processINS(line, chrom, pos, rsid, mutType, variantType, strand, errorFile, Options) elif variantType==\"TNP\" or variantType==\"ONP\":", "line.rstrip('\\n').split('\\t') break else: header = line i+=1 tumorID = toPullIDs[12] normalID = toPullIDs[13]", "wc -l\" % (fileName) pipe = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout else: cmd = \"wc", "def UpdateProgress(i, n, DisplayText): ''' Prints a progress bar where appropriate. :param i:", "parser = OptionParser(usage) parser.add_option('-i', '--input_maf', dest=\"maf\", default=None, help=\".maf file to be converted.\") parser.add_option('-o',", "('\\t'.join(line))) sys.exit() # VCF reference is the preceding base plus the reported deletion", "line[44] # Normal variant field if anything if line[41]==\"NA\": normalGenotype = \".:.,.:.:.\" else:", "is used. Will spot check 2% of a file of more than 200", "rsid = '.' elif rsid.startswith(\"rs\") == False: if Options.verbose: print(\"ERROR: %s\"%(line)) sys.exit(\"ERROR: Problem", "sys.exit(\"What the fuck\") i+=1 print('') return(toContinue) def processSNP(line, chrom, pos, rsid, mutType, variantType,", "line[28] i_t_vaf = line[43] # Get phasing information and determine reads for vaf==1", "refSeq[0]+altAllele vcfPos = refAnchorPos # Get read information iref_reads = line[37] ialt_reads =", "-i <*.maf> -o <directory> -r <ref.fa>' parser = OptionParser(usage) parser.add_option('-i', '--input_maf', dest=\"maf\", default=None,", "subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout else: cmd = \"wc -l %s\" % (fileName) pipe =", "vaf]) # Tossing these very strange mutations within the MAF file. elif ref_reads", "= '.' if tAllele1 == '-': altAllele = tAllele2 else: altAllele = tAllele1", "mutType + \";DCC_Project_Code=\" + \\ line[44] # Normal variant field if anything if", "if rsid == '': rsid = '.' elif rsid.startswith(\"rs\") == False: if Options.verbose:", "strand != '-'): with open(errorFile, 'a') as errerOut: errerOut.write('\\t'.join(line)+'\\n') if Options.verbose: print(\"WARNING: %s\"", "open(Options.maf, 'r') as inFile: with open(Options.outDir + Options.maf.split('/')[len(Options.maf.split('/'))-1].replace('.maf','.vcf'), 'w') as outVCF: errorFile =", "VAF equal to 1 if ref_reads == 'NA': ref_reads = '.' total_reads =", "and ref_reads == 'NA' and iref_reads == 'NA' and alt_reads == 'NA' and", "Options.verbose: print(\"WARNING: %s\" % '\\t'.join(line)) return(None) # Simple SNV cases else: total_reads =", "= ':'.join([GT, ','.join([ref_reads, alt_reads]), total_reads, vaf]) # Tossing these very strange mutations within", "to throw error if the provided reference matches :param refGenome: Reference Fasta file", "linetowrite = processINS(line, chrom, pos, rsid, mutType, variantType, strand, errorFile, Options) elif variantType==\"TNP\"", "'NA' or alt_reads == 'NA' and reportedVAF == 'NA': with open(errorFile, 'a') as", "# Fetch the base that precedes the deletion. refSeq = SamtoolsFaidx(Options.refGenome, chrom +", "of interest. :param ref: Reference sequence to compare to fetched sequence. :return: Fetched", "alt_reads == 'NA' and ialt_reads == 'NA': vaf = i_t_vaf GT = \"./.\"", "'--input_maf', dest=\"maf\", default=None, help=\".maf file to be converted.\") parser.add_option('-o', '--output_dir', dest=\"outDir\", default=None, help=\"Output", "\"NA\" and ref_reads == 'NA' and iref_reads == 'NA' and alt_reads == 'NA'", "count == len(a): print('') return(toContinue) # else: # print(checkIt) # print(line) # print([k", "sampling is employed to ensure proper reference is used. Will spot check 2%", "':'.join([GT, ','.join([ref_reads, alt_reads]), total_reads, vaf]) # Tossing these very strange mutations within the", "'': rsid = '.' elif rsid.startswith(\"rs\") == False: if Options.verbose: print(\"ERROR: %s\"%(line)) sys.exit(\"ERROR:", "alternative base and the variant pos vcfAlt = refSeq[0]+altAllele vcfPos = refAnchorPos #", "ref_reads == 'NA' and iref_reads=='NA' and alt_reads == 'NA' and ialt_reads=='NA': vaf=i_t_vaf GT=\"./.\"", "\".:.,.:.:.\" else: normalGenotype = \".:.,.:.:%s\" % (line[41]) lineOut = [chrom, vcfPos, rsid, vcfRef,", "since it is called w.r.t normal?) vaf = reportedVAF # Sets VAF equal", "else: GT=\"0/1\" sampleField = ':'.join([GT, ','.join([ref_reads, alt_reads]), total_reads, vaf]) # Last check for", "'NA' and ialt_reads == 'NA': GT = './.' ref_reads = '.' alt_reads =", "toContinue = SamtoolsFaidx(Options.refGenome, genomicPos, ref) if count == len(a): print('') return(toContinue) # else:", "sys from optparse import OptionParser import subprocess from functools import wraps import datetime", "DisplayText: A string that you want to print out that is informative. :return:", "vcfAlt=refSeq[0] vcfPos=refAnchorPos # Get read information iref_reads = line[37] ialt_reads = line[36] ref_reads", "vaf]) # Create INFO field INFO = \"MAF_Hugo_Symbol=\" + line[0] + \";MAF_ref_context=\" +", "in original MAF file.\\\">\\n\") ioObject.write(\"##INFO=<ID=MAF_Genome_Change,Number=1,Type=String,Description=\\\"Genome change in original MAF file.\\\">\\n\") ioObject.write(\"##INFO=<ID=MAF_Variant_Type,Number=1,Type=String,Description=\\\"Variant type (SNP,INS,DEL)", "main(): print(\"INFO: Processing MAF file.\") FilePath = os.path.dirname(os.path.abspath(__file__)) (Options, Parser) = OptionParsing() ProcessFile(Options)", "'': variantType = '.' # Determine type of variant to continue processing. linetowrite", "'NA': with open(errorFile, 'a') as errerOut: errerOut.write('\\t'.join(line)+'\\n') if Options.verbose: print(\"WARNING: %s\" % '\\t'.join(line))", "if i == 1: toPullIDs = line.rstrip('\\n').split('\\t') break else: header = line i+=1", "fileLength): ''' Randomly samples the file to ensure proper reference file is used.", "file. elif ref_reads == 'NA' or alt_reads == 'NA' and reportedVAF == 'NA':", "# if checkIt==1: UpdateProgress(count, len(a), \"INFO: Verifying maf file\") count+=1 line = line.rstrip('\\n').split('\\t')", "= str(int(ref_reads) + int(alt_reads)) vaf = str(int(alt_reads)/float(total_reads)) elif i_t_vaf!=\"\" and i_t_vaf!=\"NA\" and ref_reads", "+ Options.maf.split('/')[len(Options.maf.split('/'))-1].replace('.maf','.vcf'), 'w') as outVCF: errorFile = Options.outDir + Options.maf.split('/')[len(Options.maf.split('/')) - 1].replace('.maf', '.ignoredSNVs.maf')", "w.r.t normal?) vaf = reportedVAF # Sets VAF equal to 1 if ref_reads", "+ mutType +\";DCC_Project_Code=\" + line[44] # Normal variant field if anything if line[41]==\"NA\":", "to continue processing. linetowrite = None if variantType==\"SNP\": linetowrite = processSNP(line, chrom, pos,", "file is used. Random sampling is employed to ensure proper reference is used.", "+ 1 preceding base for the DEL refAnchorPos = str(int(pos) - 1) #", "= repr(round(int(alt_reads) / float(total_reads), 4)) if vaf != '1.' and strand==\"+\" or strand==\"-\":", "etc.) mutType = line[5] variantType = line[6] # Create proper vcf formatted information", "count = 0 i = 0 with open(Options.maf, 'r') as inFile: with open(Options.outDir", "iref_reads == 'NA' and alt_reads == 'NA' and ialt_reads == 'NA': GT =", "%prog -i <*.maf> -o <directory> -r <ref.fa>' parser = OptionParser(usage) parser.add_option('-i', '--input_maf', dest=\"maf\",", "and ialt_reads == 'NA': vaf = i_t_vaf GT = \"./.\" ref_reads = '.'", "line[2], line[10] # Get rs ID rsid = line[10] if rsid == '':", "refSeq = SamtoolsFaidx(Options.refGenome, chrom + \":\" + refAnchorPos + \"-\" + line[3], check=False)", "flag to turn on verbose mode. Default=False\") (options, args) = parser.parse_args() if options.maf", "i = 0 for line in inFile: if i == 1: toPullIDs =", "!= \"\" and i_t_vaf != \"NA\" and ref_reads == 'NA' and iref_reads ==", "n sys.stdout.write(\"[%-20s] %d%%\\t INFO: %s\" % ('=' * int(20 * j), 100 *", "mutType, variantType, strand, errorFile, Options) elif variantType==\"INS\": linetowrite = processINS(line, chrom, pos, rsid,", "vcfAlt, QUAL, '.', INFO, \"GT:AD:DP:VF\", normalGenotype, sampleField] return (lineOut) def CreateVCFLine(line, errorFile, Options):", "= processSNP(line, chrom, pos, rsid, mutType, variantType, strand, errorFile, Options) elif variantType==\"DEL\": linetowrite", "mafFile: if i != 0 and line.startswith('Hugo_Symbol Chromosome Start_position') == False: # checkIt", "count = 0 for line in mafFile: if i != 0 and line.startswith('Hugo_Symbol", "line[38] reportedVAF = line[28] i_t_vaf = line[43] # Get phasing information and determine", "'--ref_genome', dest=\"refGenome\", default=\"/Users/schencro/Desktop/Bioinformatics_Tools/Ref_Genomes/Ensembl/GRCh37.75/GRCh37.75.fa\", help=\"Reference genome to be used for maf2vcf conversion.\") parser.add_option('-s', '--spotCheckMaf',", "def SamtoolsFaidx(refGenome, genomicPos, ref='', check=True): ''' Obtain reference sequence and perform check if", "is called w.r.t normal?) vaf = reportedVAF # Sets VAF equal to 1", "genomicPos, refSeq)) sys.exit() return(None) else: return(refSeq) def SpotCheckProperReference(mafFile, Options, fileLength): ''' Randomly samples", "errorFile = Options.outDir + Options.maf.split('/')[len(Options.maf.split('/')) - 1].replace('.maf', '.ignoredSNVs.maf') with open(errorFile, 'w') as errorOut:", "alt_reads = line[38] reportedVAF = line[28] # Get phasing information and determine reads", "but unresolved MAF line if (ref != tAllele1 and ref != tAllele2) or", "line[6] # Create proper vcf formatted information if mutType == '': mutType =", "proc.wait() outInfo = proc.stdout.readlines() refSeq = ''.join([line.decode('utf-8').rstrip('\\n') for line in outInfo[1:]]) if check:", "+ \";MAF_Variant_Classification=\" + mutType +\";DCC_Project_Code=\" + line[44] # Normal variant field if anything", "200: n=0.02 else: n=1. a = np.arange(fileLength) np.random.shuffle(a) a = list(a[:int(fileLength*n)]) i =", "OptionParser(usage) parser.add_option('-i', '--input_maf', dest=\"maf\", default=None, help=\".maf file to be converted.\") parser.add_option('-o', '--output_dir', dest=\"outDir\",", "[ref_reads, iref_reads] if read != \"NA\"][0] alt_reads = [read for read in [alt_reads,", "as the alternative base and the variant pos vcfAlt = refSeq[0]+altAllele vcfPos =", "mode. Default=False\") (options, args) = parser.parse_args() if options.maf is None or options.outDir is", "= line[38] reportedVAF = line[28] # Get phasing information and determine reads for", "== '1': GT = \"1/1\" # Appears to be homozygous for alternative allele", "number of steps. :param DisplayText: A string that you want to print out", "\".:.,.:.:.\" else: normalGenotype = \".:.,.:.:%s\"%(line[41]) # Final vcf line out lineOut = [chrom,", "variant pos vcfAlt = refSeq[0]+altAllele vcfPos = refAnchorPos # Get read information iref_reads", "tumorID, normalID): now = datetime.datetime.now() ioObject.write(\"##fileformat=VCFv4.2\\n\") ioObject.write(\"##fileDate=%s\\n\"%(now.date())) ioObject.write(\"##source=maf2vcf.py\\n\") ioObject.write(\"##reference=%s\\n\"%(Options.refGenome)) ioObject.write(\"##sampleColumns=Normal.Tumor\\n\") ioObject.write(\"##INFO=<ID=MAF_Hugo_Symbol,Number=1,Type=String,Description=\\\"HUGO Symbol in", "change in original MAF file.\\\">\\n\") ioObject.write(\"##INFO=<ID=MAF_Variant_Type,Number=1,Type=String,Description=\\\"Variant type (SNP,INS,DEL) in original MAF file.\\\">\\n\") ioObject.write(\"##INFO=<ID=MAF_Variant_Classification,Number=1,Type=String,Description=\\\"Variant", "int(20 * j), 100 * j, DisplayText)) sys.stdout.flush() def SamtoolsFaidx(refGenome, genomicPos, ref='', check=True):", "i = 0 with open(Options.maf, 'r') as inFile: with open(Options.outDir + Options.maf.split('/')[len(Options.maf.split('/'))-1].replace('.maf','.vcf'), 'w')", "the preceding base in the insertion in MAF vcfRef = refSeq[0] # VCF", "if QUAL == 'None' or QUAL == 'NA' or QUAL == '': QUAL", "variantType==\"SNP\": linetowrite = processSNP(line, chrom, pos, rsid, mutType, variantType, strand, errorFile, Options) elif", "Start_position') == True: toContinue = True else: sys.exit(\"What the fuck\") i+=1 print('') return(toContinue)", "tAllele2 else: altAllele = tAllele1 # Obtain the reference sequence + 1 preceding", "'.' elif ( i_t_vaf == \"\" or i_t_vaf == \"NA\") and ref_reads ==", "print (\"INFO: Total time running %s: %s minutes\" % (function.__name__, str(round((t1-t0)/60.,2))) ) return", "# Alt Allele QUAL = line[42] if QUAL == 'None' or QUAL ==", "pos, rsid, mutType, variantType, strand, errorFile, Options): ref = line[7] tAllele1 = line[8]", "!= tAllele2) or (strand != '+' and strand != '-'): with open(errorFile, 'a')", "line[42] if QUAL == 'None' or QUAL == 'NA' or QUAL == '':", "wrapper at the top of any function you want to get run time", "stdout=subprocess.PIPE) proc.wait() outInfo = proc.stdout.readlines() refSeq = ''.join([line.decode('utf-8').rstrip('\\n') for line in outInfo[1:]]) if", "'1.' and strand==\"+\" or strand==\"-\": GT=\"0|1\" else: GT=\"0/1\" sampleField = ':'.join([GT, ','.join([ref_reads, alt_reads]),", "''' Script to convert a MAF to a vcf4.2 file using python >=3.6.", "at the top of any function you want to get run time information", "file.\") elif line.startswith('Hugo_Symbol Chromosome Start_position') == True: toContinue = True else: sys.exit(\"What the", "original MAF file.\\\">\\n\") ioObject.write(\"##INFO=<ID=MAF_Variant_Type,Number=1,Type=String,Description=\\\"Variant type (SNP,INS,DEL) in original MAF file.\\\">\\n\") ioObject.write(\"##INFO=<ID=MAF_Variant_Classification,Number=1,Type=String,Description=\\\"Variant Classification (if", "# Alt Allele Typically QUAL = line[42] if QUAL == 'None' or QUAL", "and ialt_reads == 'NA': GT = './.' ref_reads = '.' alt_reads = '.'", "Obtain the reference sequence + 1 preceding base for the DEL refAnchorPos =", "\"NA\"][0] alt_reads = [read for read in [alt_reads, ialt_reads] if read != \"NA\"][0]", "Options.outDir + Options.maf.split('/')[len(Options.maf.split('/'))-1].replace('.head.maf','.sorted.vcf.gz') os.system(\"cat %s | awk '$1 ~ /^#/ {print $0;next} {print", "alt_reads = '.' total_reads = '.' elif (i_t_vaf==\"\" or i_t_vaf==\"NA\") and ref_reads ==", "os.system(\"rm %s\"%(vcfFile)) os.system(\"gzip %s\"%(errorFile)) def main(): print(\"INFO: Processing MAF file.\") FilePath = os.path.dirname(os.path.abspath(__file__))", "$0;next} {print $0 | \\\"LC_ALL=C sort -k1,1 -k2,2n\\\"}' | gzip > %s\"%(vcfFile, vcfFileSorted))", "= tAllele2 refAllele = tAllele1 ref_reads = line[39] alt_reads = line[38] reportedVAF =", ">=3.6. Created by <NAME> 8 March 2018 ''' import os import sys from", ":param function: Function of interest. :return: A function to wrap around a function.", "Total time running %s: %s minutes\" % (function.__name__, str(round((t1-t0)/60.,2))) ) return result return", "ref_reads == 'NA': ref_reads = '.' total_reads = alt_reads else: alt_reads = '.'", "chrom + \":\" + refAnchorPos + \"-\" + line[3], check=False) if refSeq[1:] !=", "else: altAllele = tAllele2 refAllele = tAllele1 # Obtain the reference sequence +", "'.', INFO, \"GT:AD:DP:VF\", normalGenotype, sampleField] return (lineOut) def CreateVCFLine(line, errorFile, Options): line =", "or i_t_vaf == \"NA\") and ref_reads == 'NA' and iref_reads == 'NA' and", "outVCF.write('\\t'.join(linetoWrite)+'\\n') print('') print(\"INFO: Sorting vcf file.\") vcfFile = Options.outDir + Options.maf.split('/')[len(Options.maf.split('/'))-1].replace('.maf','.vcf') vcfFileSorted =", "DEL refAnchorPos = str(int(pos)-1) # Fetch the base that precedes the deletion. refSeq", "check 2% of a file of more than 200 variants. :param mafFile: Input", "vcfFileSorted = Options.outDir + Options.maf.split('/')[len(Options.maf.split('/'))-1].replace('.head.maf','.sorted.vcf.gz') os.system(\"cat %s | awk '$1 ~ /^#/ {print", "to compare to fetched sequence. :return: Fetched reference sequence. ''' proc = subprocess.Popen(['samtools','faidx',refGenome,", "'$1 ~ /^#/ {print $0;next} {print $0 | \\\"LC_ALL=C sort -k1,1 -k2,2n\\\"}' |", "python >=3.6. Created by <NAME> 8 March 2018 ''' import os import sys", "parser.add_option('-r', '--ref_genome', dest=\"refGenome\", default=\"/Users/schencro/Desktop/Bioinformatics_Tools/Ref_Genomes/Ensembl/GRCh37.75/GRCh37.75.fa\", help=\"Reference genome to be used for maf2vcf conversion.\") parser.add_option('-s',", "refAnchorPos # Get read information iref_reads = line[37] ialt_reads = line[36] ref_reads =", "to verify reference matching to maf file. Default=False\") parser.add_option('-v', '--verbose', dest='verbose', default=False, action='store_true',", "elif line.startswith('Hugo_Symbol Chromosome Start_position') == True: toContinue = True else: sys.exit(\"What the fuck\")", "for .vcf file\") parser.add_option('-r', '--ref_genome', dest=\"refGenome\", default=\"/Users/schencro/Desktop/Bioinformatics_Tools/Ref_Genomes/Ensembl/GRCh37.75/GRCh37.75.fa\", help=\"Reference genome to be used for", "ioObject.write(\"##INFO=<ID=MAF_Genome_Change,Number=1,Type=String,Description=\\\"Genome change in original MAF file.\\\">\\n\") ioObject.write(\"##INFO=<ID=MAF_Variant_Type,Number=1,Type=String,Description=\\\"Variant type (SNP,INS,DEL) in original MAF file.\\\">\\n\")", "!= \"NA\" and ref_reads == 'NA' and iref_reads == 'NA' and alt_reads ==", "file.\\\">\\n\") ioObject.write(\"##INFO=<ID=MAF_Variant_Classification,Number=1,Type=String,Description=\\\"Variant Classification (if SNP) in original MAF file.\\\">\\n\") ioObject.write(\"##INFO=<ID=DCC_Project_Code,Number=1,Type=String,Description=\\\"DCC Project Code in", "mafFile object (opened) :param Options: Parser Options :param fileLength: Length of the file", "ioObject.write(\"##INFO=<ID=MAF_Variant_Classification,Number=1,Type=String,Description=\\\"Variant Classification (if SNP) in original MAF file.\\\">\\n\") ioObject.write(\"##INFO=<ID=DCC_Project_Code,Number=1,Type=String,Description=\\\"DCC Project Code in original", "more than 200 variants. :param mafFile: Input mafFile object (opened) :param Options: Parser", "sys.exit(\"ERROR: Problem processing DEL %s\"%('\\t'.join(line))) sampleField = ':'.join([GT, ','.join([ref_reads, alt_reads]), total_reads, vaf]) #", "line[38] reportedVAF = line[28] # Get phasing information and determine reads for vaf==1", "this flag to turn on verbose mode. Default=False\") (options, args) = parser.parse_args() if", "include arguments for maf file, output directory, and reference genome (single fasta file).\")", "== 'NA' and alt_reads == 'NA' and ialt_reads == 'NA': vaf = i_t_vaf", "1 if ref_reads == 'NA': ref_reads = '.' total_reads = alt_reads else: alt_reads", "== tAllele1: altAllele = tAllele1 refAllele = tAllele2 else: altAllele = tAllele2 refAllele", "ioObject.write(\"##INFO=<ID=DCC_Project_Code,Number=1,Type=String,Description=\\\"DCC Project Code in original MAF file.\\\">\\n\") ioObject.write(\"##FORMAT=<ID=GT,Number=1,Type=String,Description=\\\"Genotype\\\">\\n\") ioObject.write(\"##FORMAT=<ID=AD,Number=2,Type=Integer,Description=\\\"Allelic depths of REF and", "reference genome') print('ERROR: Improper reference. Found %s at %s. Reference genome shows %s'", "usage = 'usage: %prog -i <*.maf> -o <directory> -r <ref.fa>' parser = OptionParser(usage)", "= tAllele2 refAllele = tAllele1 # Obtain the reference sequence + 1 preceding", "':'.join([GT, ','.join([ref_reads, alt_reads]), total_reads, vaf]) # Create INFO field INFO = \"MAF_Hugo_Symbol=\" +", "UpdateProgressGetN(fileName): if fileName[len(fileName)-1]==\"z\": cmd = \"gzip -cd %s | wc -l\" % (fileName)", "True else: sys.exit(\"What the fuck\") i+=1 print('') return(toContinue) def processSNP(line, chrom, pos, rsid,", "fetched sequence. :return: Fetched reference sequence. ''' proc = subprocess.Popen(['samtools','faidx',refGenome, genomicPos], stdout=subprocess.PIPE) proc.wait()", "line[39] alt_reads = line[38] reportedVAF = line[28] i_t_vaf = line[43] # Get phasing", "ialt_reads=='NA': vaf=i_t_vaf GT=\"./.\" ref_reads = '.' alt_reads = '.' total_reads = '.' elif", "= \".:.,.:.:.\" else: normalGenotype = \".:.,.:.:%s\"%(line[41]) # Final vcf line out lineOut =", "what I'm choosing to filter out as possible... if Options.verbose: print(\"WARNING: Malformed MAF", "of the file being read :return: None ''' print(\"INFO: Verifying maf file.\") if", "line[7] tAllele1 = line[8] # Normal Allele Typically tAllele2 = line[9] # Alt", "normalID = toPullIDs[13] count = 0 i = 0 with open(Options.maf, 'r') as", "original MAF file.\\\">\\n\") ioObject.write(\"##INFO=<ID=MAF_Variant_Classification,Number=1,Type=String,Description=\\\"Variant Classification (if SNP) in original MAF file.\\\">\\n\") ioObject.write(\"##INFO=<ID=DCC_Project_Code,Number=1,Type=String,Description=\\\"DCC Project", "== False: print(\"\") print(\"ERROR: No header found in maf file.\") elif line.startswith('Hugo_Symbol Chromosome", "'NA' and iref_reads=='NA' and alt_reads == 'NA' and ialt_reads=='NA': vaf=i_t_vaf GT=\"./.\" ref_reads =", "'--output_dir', dest=\"outDir\", default=None, help=\"Output directory for .vcf file\") parser.add_option('-r', '--ref_genome', dest=\"refGenome\", default=\"/Users/schencro/Desktop/Bioinformatics_Tools/Ref_Genomes/Ensembl/GRCh37.75/GRCh37.75.fa\", help=\"Reference", "= line[9] # Alt Allele Typically QUAL = line[42] if QUAL == 'None'", "','.join([ref_reads, alt_reads]), total_reads, vaf]) # Tossing these very strange mutations within the MAF", "INFO = \"MAF_Hugo_Symbol=\" + line[0] + \";MAF_ref_context=\" + line[15].upper() + \";MAF_Genome_Change=\" + line[", "# Variant Classification/Type (Type is SNP, INS, DEL, etc.) mutType = line[5] variantType", "Options.spotcheck: with open(Options.maf, 'r') as inFile: SpotCheckProperReference(inFile, Options, n) with open(Options.maf,'r') as inFile:", "Created by <NAME> 8 March 2018 ''' import os import sys from optparse", "Classification (if SNP) in original MAF file.\\\">\\n\") ioObject.write(\"##INFO=<ID=DCC_Project_Code,Number=1,Type=String,Description=\\\"DCC Project Code in original MAF", "errorOut: errorOut.write(header) CreateHeader(outVCF, Options, tumorID, normalID) for line in inFile: UpdateProgress(i, n, \"Processing", "in maf file.\") elif line.startswith('Hugo_Symbol Chromosome Start_position') == True: toContinue = True else:", "str(int(ref_reads) + int(alt_reads)) vaf = str(int(alt_reads) / float(total_reads)) elif i_t_vaf != \"\" and", "sequence. %s\" % ('\\t'.join(line))) sys.exit() # VCF reference is the preceding base plus", "== 'NA' and reportedVAF == '1': GT = \"1/1\" # Appears to be", "% (line[41]) lineOut = [chrom, vcfPos, rsid, vcfRef, vcfAlt, QUAL, '.', INFO, \"GT:AD:DP:VF\",", "return (options, parser) def fn_timer(function): ''' Use this as a wrapper at the", "%s\" % ('\\t'.join(line))) sys.exit() # VCF reference is the preceding base plus the", "{print $0;next} {print $0 | \\\"LC_ALL=C sort -k1,1 -k2,2n\\\"}' | gzip > %s\"%(vcfFile,", "'a') as errerOut: errerOut.write('\\t'.join(line)+'\\n') if Options.verbose: print(\"WARNING: %s\" % '\\t'.join(line)) return(None) # Simple", "SamtoolsFaidx(Options.refGenome, genomicPos, ref) if count == len(a): print('') return(toContinue) # else: # print(checkIt)", "* j), 100 * j, DisplayText)) sys.stdout.flush() def SamtoolsFaidx(refGenome, genomicPos, ref='', check=True): '''", "normalGenotype = \".:.,.:.:.\" else: normalGenotype = \".:.,.:.:%s\" % (line[41]) lineOut = [chrom, vcfPos,", "k==i]) # if checkIt==1: UpdateProgress(count, len(a), \"INFO: Verifying maf file\") count+=1 line =", "line[2] + \"-\" + line[3] ref = line[7] mutType = line[5] variantClass =", "None ''' print(\"INFO: Verifying maf file.\") if fileLength > 200: n=0.02 else: n=1.", "or QUAL == 'NA' or QUAL == '': QUAL = '.' if tAllele1", "'NA' and alt_reads == 'NA' and ialt_reads == 'NA': vaf = i_t_vaf GT", "Final vcf line out lineOut = [chrom, pos, rsid, refAllele, altAllele, QUAL, '.',", "as inFile: SpotCheckProperReference(inFile, Options, n) with open(Options.maf,'r') as inFile: i = 0 for", "be homozygous for alternative allele (germline unlikely since it is called w.r.t normal?)", "= \".:.,.:.:.\" else: normalGenotype = \".:.,.:.:%s\" % (line[41]) lineOut = [chrom, vcfPos, rsid,", "outInfo = proc.stdout.readlines() refSeq = ''.join([line.decode('utf-8').rstrip('\\n') for line in outInfo[1:]]) if check: if", "a vcf4.2 file using python >=3.6. Created by <NAME> 8 March 2018 '''", "= (i + 1) / n sys.stdout.write(\"[%-20s] %d%%\\t INFO: %s\" % ('=' *", "maf file.\") elif line.startswith('Hugo_Symbol Chromosome Start_position') == True: toContinue = True else: sys.exit(\"What", "Create INFO field INFO = \"MAF_Hugo_Symbol=\" + line[0] + \";MAF_ref_context=\" + line[15].upper() +", "\"INFO: Verifying maf file\") count+=1 line = line.rstrip('\\n').split('\\t') genomicPos = line[1] + \":\"", "else: normalGenotype = \".:.,.:.:%s\"%(line[41]) # Final vcf line out lineOut = [chrom, pos,", "parser.add_option('-i', '--input_maf', dest=\"maf\", default=None, help=\".maf file to be converted.\") parser.add_option('-o', '--output_dir', dest=\"outDir\", default=None,", "time running %s: %s minutes\" % (function.__name__, str(round((t1-t0)/60.,2))) ) return result return function_timer", "Position of interest. :param ref: Reference sequence to compare to fetched sequence. :return:", "if options.maf is None or options.outDir is None or options.refGenome is None: print(\"ERROR:", "sampleField] return(lineOut) def processDEL(line, chrom, pos, rsid, mutType, variantType, strand, errorFile, Options): ref", "+ line[44] # Normal variant field if anything if line[41]==\"NA\": normalGenotype = \".:.,.:.:.\"", "i_t_vaf = line[43] # Get phasing information and determine reads for vaf==1 if", "the file to ensure proper reference file is used. Random sampling is employed", "'NA': GT = './.' ref_reads = '.' alt_reads = '.' total_reads = '.'", "print(\"INFO: Sorting vcf file.\") vcfFile = Options.outDir + Options.maf.split('/')[len(Options.maf.split('/'))-1].replace('.maf','.vcf') vcfFileSorted = Options.outDir +", "out lineOut = [chrom, pos, rsid, refAllele, altAllele, QUAL, '.', INFO, \"GT:AD:DP:VF\", normalGenotype,", "the fuck\") i+=1 print('') return(toContinue) def processSNP(line, chrom, pos, rsid, mutType, variantType, strand,", "the reference sequence + 1 preceding base for the DEL refAnchorPos = str(int(pos)-1)", "= ':'.join([GT, ','.join([ref_reads, alt_reads]), total_reads, vaf]) # Last check for interesting but unresolved", "for line in outInfo[1:]]) if check: if refSeq == ref: return(True) else: print('ERROR:", "rsid, mutType, variantType, strand, errorFile, Options): ref = line[7] tAllele1 = line[8] #", "\";MAF_Variant_Type=\" + variantType + \";MAF_Variant_Classification=\" + mutType +\";DCC_Project_Code=\" + line[44] # Normal variant", "check: Whether or not to throw error if the provided reference matches :param", "[chrom, pos, rsid, refAllele, altAllele, QUAL, '.', INFO, \"GT:AD:DP:VF\", normalGenotype, sampleField] return(lineOut) def", "-o <directory> -r <ref.fa>' parser = OptionParser(usage) parser.add_option('-i', '--input_maf', dest=\"maf\", default=None, help=\".maf file", "'a') as errerOut: errerOut.write('\\t'.join(line)+'\\n') # print(line) # sys.exit(\"ERROR: Malformed MAF entry.\") return(linetowrite) def", "# Genomic Position chrom, pos, id = line[1], line[2], line[10] # Get rs", "turn on verbose mode. Default=False\") (options, args) = parser.parse_args() if options.maf is None", "with open(Options.maf, 'r') as inFile: SpotCheckProperReference(inFile, Options, n) with open(Options.maf,'r') as inFile: i", "default=False, action='store_true', help=\"Use this flag to verify reference matching to maf file. Default=False\")", "'usage: %prog -i <*.maf> -o <directory> -r <ref.fa>' parser = OptionParser(usage) parser.add_option('-i', '--input_maf',", "== 'NA' or QUAL == '': QUAL = '.' if tAllele1 == '-':", "\"MAF_Hugo_Symbol=\" + line[0] + \";MAF_ref_context=\" + line[15].upper() + \";MAF_Genome_Change=\" + line[ 14] +", "refSeq)) sys.exit() return(None) else: return(refSeq) def SpotCheckProperReference(mafFile, Options, fileLength): ''' Randomly samples the", "n, DisplayText): ''' Prints a progress bar where appropriate. :param i: Current Step", "Options) elif variantType==\"TNP\" or variantType==\"ONP\": with open(errorFile, 'a') as errerOut: errerOut.write('\\t'.join(line)+'\\n') else: #", "dest=\"refGenome\", default=\"/Users/schencro/Desktop/Bioinformatics_Tools/Ref_Genomes/Ensembl/GRCh37.75/GRCh37.75.fa\", help=\"Reference genome to be used for maf2vcf conversion.\") parser.add_option('-s', '--spotCheckMaf', dest='spotcheck',", "toPullIDs = line.rstrip('\\n').split('\\t') break else: header = line i+=1 tumorID = toPullIDs[12] normalID", "toPullIDs[12] normalID = toPullIDs[13] count = 0 i = 0 with open(Options.maf, 'r')", "Length of the file being read :return: None ''' print(\"INFO: Verifying maf file.\")", "QUAL, '.', INFO, \"GT:AD:DP:VF\", normalGenotype, sampleField] return (lineOut) def CreateVCFLine(line, errorFile, Options): line", "os import sys from optparse import OptionParser import subprocess from functools import wraps", "ioObject.write(\"##fileformat=VCFv4.2\\n\") ioObject.write(\"##fileDate=%s\\n\"%(now.date())) ioObject.write(\"##source=maf2vcf.py\\n\") ioObject.write(\"##reference=%s\\n\"%(Options.refGenome)) ioObject.write(\"##sampleColumns=Normal.Tumor\\n\") ioObject.write(\"##INFO=<ID=MAF_Hugo_Symbol,Number=1,Type=String,Description=\\\"HUGO Symbol in original MAF file.\\\">\\n\") ioObject.write(\"##INFO=<ID=MAF_ref_context,Number=1,Type=String,Description=\\\"Reference context", "field if anything if line[41] == \"NA\": normalGenotype = \".:.,.:.:.\" else: normalGenotype =", "'r') as inFile: with open(Options.outDir + Options.maf.split('/')[len(Options.maf.split('/'))-1].replace('.maf','.vcf'), 'w') as outVCF: errorFile = Options.outDir", "action='store_true', help=\"Use this flag to turn on verbose mode. Default=False\") (options, args) =", "not match reference sequence. %s\" % ('\\t'.join(line))) sys.exit() # VCF reference is the", "== 'NA' and iref_reads=='NA' and alt_reads == 'NA' and ialt_reads=='NA': vaf=i_t_vaf GT=\"./.\" ref_reads", "total_reads = str(int(ref_reads) + int(alt_reads)) vaf = str(int(alt_reads)/float(total_reads)) elif i_t_vaf!=\"\" and i_t_vaf!=\"NA\" and", "ref_reads == 'NA' or alt_reads == 'NA' and reportedVAF == 'NA': with open(errorFile,", "= line[5] variantClass = line[6] if variantClass != \"INS\" and variantClass != \"TNP\"", "total_reads, vaf]) # Tossing these very strange mutations within the MAF file. elif", "and iref_reads == 'NA' and alt_reads == 'NA' and ialt_reads == 'NA': GT", "return(None) # Create INFO field INFO = \"MAF_Hugo_Symbol=\" + line[0] + \";MAF_ref_context=\" +", "the top of any function you want to get run time information about.", "\":\" + refAnchorPos + \"-\" + line[3], check=False) if refSeq[1:] != altAllele: print(\"ERROR:", "to turn on verbose mode. Default=False\") (options, args) = parser.parse_args() if options.maf is", "line[43] # Get phasing information and determine reads for vaf==1 if (ref_reads !=", "(SNP,INS,DEL) in original MAF file.\\\">\\n\") ioObject.write(\"##INFO=<ID=MAF_Variant_Classification,Number=1,Type=String,Description=\\\"Variant Classification (if SNP) in original MAF file.\\\">\\n\")", "= i_t_vaf GT = \"./.\" ref_reads = '.' alt_reads = '.' total_reads =", "and alt_reads == 'NA' and ialt_reads == 'NA': vaf = i_t_vaf GT =", "to maf file. Default=False\") parser.add_option('-v', '--verbose', dest='verbose', default=False, action='store_true', help=\"Use this flag to", "parser) def fn_timer(function): ''' Use this as a wrapper at the top of", "variants. :param mafFile: Input mafFile object (opened) :param Options: Parser Options :param fileLength:", "to know as much of what I'm choosing to filter out as possible...", "perform check if needed. :param check: Whether or not to throw error if", "ProcessFile(Options): n = UpdateProgressGetN(Options.maf) if Options.spotcheck: with open(Options.maf, 'r') as inFile: SpotCheckProperReference(inFile, Options,", "check: if refSeq == ref: return(True) else: print('ERROR: May not be proper reference", "ioObject.write(\"##reference=%s\\n\"%(Options.refGenome)) ioObject.write(\"##sampleColumns=Normal.Tumor\\n\") ioObject.write(\"##INFO=<ID=MAF_Hugo_Symbol,Number=1,Type=String,Description=\\\"HUGO Symbol in original MAF file.\\\">\\n\") ioObject.write(\"##INFO=<ID=MAF_ref_context,Number=1,Type=String,Description=\\\"Reference context in original MAF", "ialt_reads!='NA'): GT=\"0/1\" ref_reads = [read for read in [ref_reads, iref_reads] if read !=", "matching to maf file. Default=False\") parser.add_option('-v', '--verbose', dest='verbose', default=False, action='store_true', help=\"Use this flag", "else: sys.exit(\"ERROR: Problem processing DEL %s\"%('\\t'.join(line))) sampleField = ':'.join([GT, ','.join([ref_reads, alt_reads]), total_reads, vaf])", "as np def OptionParsing(): usage = 'usage: %prog -i <*.maf> -o <directory> -r", "= tAllele1 # Obtain the reference sequence + 1 preceding base for the", "alt_reads]), total_reads, vaf]) # Tossing these very strange mutations within the MAF file.", "n) with open(Options.maf,'r') as inFile: i = 0 for line in inFile: if", "rsid, mutType, variantType, strand, errorFile, Options) elif variantType==\"TNP\" or variantType==\"ONP\": with open(errorFile, 'a')", "I'm choosing to filter out as possible... if Options.verbose: print(\"WARNING: Malformed MAF entry.", "continue processing. linetowrite = None if variantType==\"SNP\": linetowrite = processSNP(line, chrom, pos, rsid,", "0 count = 0 for line in mafFile: if i != 0 and", "(options, parser) def fn_timer(function): ''' Use this as a wrapper at the top", "vaf = repr(round(int(alt_reads) / float(total_reads), 4)) if vaf != '1.' and strand==\"+\" or", "i_t_vaf==\"NA\") and ref_reads == 'NA' and iref_reads=='NA' and alt_reads == 'NA' and ialt_reads=='NA':", "line.startswith('Hugo_Symbol Chromosome Start_position') == True: toContinue = True else: sys.exit(\"What the fuck\") i+=1", "for maf2vcf conversion.\") parser.add_option('-s', '--spotCheckMaf', dest='spotcheck', default=False, action='store_true', help=\"Use this flag to verify", "%s\" % '\\t'.join(line)) return(None) # Simple SNV cases else: total_reads = str(int(ref_reads) +", "'.' if tAllele1 == '-': altAllele = tAllele2 else: altAllele = tAllele1 #", "the reported deletion in the MAF file. vcfRef = refSeq # VCF has", "+ variantType + \";MAF_Variant_Classification=\" + mutType + \";DCC_Project_Code=\" + \\ line[44] # Normal", "= function(*args, **kwargs) t1 = time.time() print (\"INFO: Total time running %s: %s", "else: # This may seem duplicitious, but I explicityly want to know as", "== 'NA': with open(errorFile, 'a') as errerOut: errerOut.write('\\t'.join(line)+'\\n') if Options.verbose: print(\"WARNING: %s\" %", "check=True): ''' Obtain reference sequence and perform check if needed. :param check: Whether", "line[3], check=False) if refSeq[1:] != altAllele: print(\"ERROR: Deletion alternative allele does not match", "== '': mutType = '.' if variantType == '': variantType = '.' #", "open(Options.outDir + Options.maf.split('/')[len(Options.maf.split('/'))-1].replace('.maf','.vcf'), 'w') as outVCF: errorFile = Options.outDir + Options.maf.split('/')[len(Options.maf.split('/')) - 1].replace('.maf',", "with open(Options.outDir + Options.maf.split('/')[len(Options.maf.split('/'))-1].replace('.maf','.vcf'), 'w') as outVCF: errorFile = Options.outDir + Options.maf.split('/')[len(Options.maf.split('/')) -", "'\\t'.join(line)) return(None) # Simple SNV cases else: total_reads = str(int(ref_reads) + int(alt_reads)) vaf", "int(alt_reads)) vaf = str(int(alt_reads) / float(total_reads)) elif i_t_vaf != \"\" and i_t_vaf !=", "= datetime.datetime.now() ioObject.write(\"##fileformat=VCFv4.2\\n\") ioObject.write(\"##fileDate=%s\\n\"%(now.date())) ioObject.write(\"##source=maf2vcf.py\\n\") ioObject.write(\"##reference=%s\\n\"%(Options.refGenome)) ioObject.write(\"##sampleColumns=Normal.Tumor\\n\") ioObject.write(\"##INFO=<ID=MAF_Hugo_Symbol,Number=1,Type=String,Description=\\\"HUGO Symbol in original MAF file.\\\">\\n\")", "steps. :param DisplayText: A string that you want to print out that is", "n=1. a = np.arange(fileLength) np.random.shuffle(a) a = list(a[:int(fileLength*n)]) i = 0 count =", "rsid, vcfRef, vcfAlt, QUAL, '.', INFO, \"GT:AD:DP:VF\", normalGenotype, sampleField] return (lineOut) def CreateVCFLine(line,", "total_reads='.' vaf='.' else: sys.exit(\"ERROR: Problem processing DEL %s\"%('\\t'.join(line))) sampleField = ':'.join([GT, ','.join([ref_reads, alt_reads]),", "(strand != '+' and strand != '-'): with open(errorFile, 'a') as errerOut: errerOut.write('\\t'.join(line)+'\\n')", "'.' if variantType == '': variantType = '.' # Determine type of variant", "strange mutations within the MAF file. elif ref_reads == 'NA' or alt_reads ==", "iref_reads == 'NA' and alt_reads == 'NA' and ialt_reads == 'NA': vaf =", "Prints a progress bar where appropriate. :param i: Current Step :param n: Total", "-cd %s | wc -l\" % (fileName) pipe = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout else:", "len(a): print('') return(toContinue) # else: # print(checkIt) # print(line) # print([k for k", "if line[41]==\"NA\": normalGenotype = \".:.,.:.:.\" else: normalGenotype = \".:.,.:.:%s\"%(line[41]) # Final vcf line", "in id column\") # Strand Information strand = line[4] # Variant Classification/Type (Type", "variantClass != \"TNP\" and variantClass !=\"ONP\": toContinue = SamtoolsFaidx(Options.refGenome, genomicPos, ref) if count", "type (SNP,INS,DEL) in original MAF file.\\\">\\n\") ioObject.write(\"##INFO=<ID=MAF_Variant_Classification,Number=1,Type=String,Description=\\\"Variant Classification (if SNP) in original MAF", "(i_t_vaf==\"\" or i_t_vaf==\"NA\") and ref_reads == 'NA' and iref_reads=='NA' and alt_reads == 'NA'", "\"GT:AD:DP:VF\", normalGenotype, sampleField] return(lineOut) def processINS(line, chrom, pos, rsid, mutType, variantType, strand, errorFile,", "line[9] # Alt Allele QUAL = line[42] if QUAL == 'None' or QUAL", "n: Total number of steps. :param DisplayText: A string that you want to", "iref_reads!='NA') and (alt_reads != 'NA' or ialt_reads!='NA'): GT=\"0/1\" ref_reads = [read for read", "to ensure proper reference file is used. Random sampling is employed to ensure", "used. Random sampling is employed to ensure proper reference is used. Will spot", "from functools import wraps import datetime import time import numpy as np def", "the deletion as the alternative base and the variant pos vcfAlt = refSeq[0]+altAllele", "if needed. :param check: Whether or not to throw error if the provided", "% (fileName) pipe = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout return(int(pipe.read().decode(\"utf-8\").lstrip(\" \").split(\" \")[0])) def UpdateProgress(i, n,", "line.rstrip('\\n').split('\\t') # Genomic Position chrom, pos, id = line[1], line[2], line[10] # Get", "base directly preceding the deletion as the alternative base and the variant pos", "refSeq == ref: return(True) else: print('ERROR: May not be proper reference genome') print('ERROR:", "INS %s\" % ('\\t'.join(line))) sampleField = ':'.join([GT, ','.join([ref_reads, alt_reads]), total_reads, vaf]) # Create", "genome') print('ERROR: Improper reference. Found %s at %s. Reference genome shows %s' %", "import numpy as np def OptionParsing(): usage = 'usage: %prog -i <*.maf> -o", "(single fasta file).\") sys.exit() else: pass return (options, parser) def fn_timer(function): ''' Use", "+ \";MAF_ref_context=\" + line[15].upper() + \";MAF_Genome_Change=\" + line[ 14] + \";MAF_Variant_Type=\" + variantType", "''' Obtain reference sequence and perform check if needed. :param check: Whether or", "original MAF file.\\\">\\n\") ioObject.write(\"##INFO=<ID=MAF_Genome_Change,Number=1,Type=String,Description=\\\"Genome change in original MAF file.\\\">\\n\") ioObject.write(\"##INFO=<ID=MAF_Variant_Type,Number=1,Type=String,Description=\\\"Variant type (SNP,INS,DEL) in", "open(Options.maf,'r') as inFile: i = 0 for line in inFile: if i ==", "the base that precedes the deletion. refSeq = SamtoolsFaidx(Options.refGenome, chrom + \":\" +", "informative. :return: None ''' sys.stdout.write('\\r') j = (i + 1) / n sys.stdout.write(\"[%-20s]", "file.\") vcfFile = Options.outDir + Options.maf.split('/')[len(Options.maf.split('/'))-1].replace('.maf','.vcf') vcfFileSorted = Options.outDir + Options.maf.split('/')[len(Options.maf.split('/'))-1].replace('.head.maf','.sorted.vcf.gz') os.system(\"cat %s", "return function_timer def UpdateProgressGetN(fileName): if fileName[len(fileName)-1]==\"z\": cmd = \"gzip -cd %s | wc", "in a]) # sys.exit(\"Problem here\") elif i != 0 and line.startswith('Hugo_Symbol Chromosome Start_position')", "= subprocess.Popen(['samtools','faidx',refGenome, genomicPos], stdout=subprocess.PIPE) proc.wait() outInfo = proc.stdout.readlines() refSeq = ''.join([line.decode('utf-8').rstrip('\\n') for line", "formatted information if mutType == '': mutType = '.' if variantType == '':", "preceding base for the DEL refAnchorPos = str(int(pos) - 1) # Fetch the", "chrom, pos, id = line[1], line[2], line[10] # Get rs ID rsid =", "QUAL == 'NA' or QUAL == '': QUAL = '.' if tAllele1 ==", "ialt_reads = line[36] ref_reads = line[39] alt_reads = line[38] reportedVAF = line[28] i_t_vaf", "alt_reads == 'NA' and reportedVAF == 'NA': with open(errorFile, 'a') as errerOut: errerOut.write('\\t'.join(line)+'\\n')", "line[28] # Get phasing information and determine reads for vaf==1 if ref_reads ==", "or alt_reads == 'NA' and reportedVAF == '1': GT = \"1/1\" # Appears", "preceding base for the DEL refAnchorPos = str(int(pos)-1) # Fetch the base that", "os.system(\"cat %s | awk '$1 ~ /^#/ {print $0;next} {print $0 | \\\"LC_ALL=C", "Problem processing DEL %s\"%('\\t'.join(line))) sampleField = ':'.join([GT, ','.join([ref_reads, alt_reads]), total_reads, vaf]) # Create", "now = datetime.datetime.now() ioObject.write(\"##fileformat=VCFv4.2\\n\") ioObject.write(\"##fileDate=%s\\n\"%(now.date())) ioObject.write(\"##source=maf2vcf.py\\n\") ioObject.write(\"##reference=%s\\n\"%(Options.refGenome)) ioObject.write(\"##sampleColumns=Normal.Tumor\\n\") ioObject.write(\"##INFO=<ID=MAF_Hugo_Symbol,Number=1,Type=String,Description=\\\"HUGO Symbol in original MAF", "import os import sys from optparse import OptionParser import subprocess from functools import", "/ float(total_reads)) elif i_t_vaf != \"\" and i_t_vaf != \"NA\" and ref_reads ==", "-l %s\" % (fileName) pipe = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout return(int(pipe.read().decode(\"utf-8\").lstrip(\" \").split(\" \")[0])) def", "vcfPos, rsid, vcfRef, vcfAlt, QUAL, '.', INFO, \"GT:AD:DP:VF\", normalGenotype, sampleField] return (lineOut) def", "the MAF file. vcfRef = refSeq # VCF has base directly preceding the", "if variantType==\"SNP\": linetowrite = processSNP(line, chrom, pos, rsid, mutType, variantType, strand, errorFile, Options)", "field INFO = \"MAF_Hugo_Symbol=\" + line[0] + \";MAF_ref_context=\" + line[15].upper() + \";MAF_Genome_Change=\" +", "processing. linetowrite = None if variantType==\"SNP\": linetowrite = processSNP(line, chrom, pos, rsid, mutType,", "a = np.arange(fileLength) np.random.shuffle(a) a = list(a[:int(fileLength*n)]) i = 0 count = 0", "Default=False\") (options, args) = parser.parse_args() if options.maf is None or options.outDir is None", "throw error if the provided reference matches :param refGenome: Reference Fasta file :param", "is used. Random sampling is employed to ensure proper reference is used. Will", "and ialt_reads=='NA': vaf=i_t_vaf GT=\"./.\" ref_reads = '.' alt_reads = '.' total_reads = '.'", "repr(round(int(alt_reads) / float(total_reads), 4)) if vaf != '1.' and strand==\"+\" or strand==\"-\": GT=\"0|1\"", "alt_reads = '.' total_reads = '.' elif ( i_t_vaf == \"\" or i_t_vaf", "i: Current Step :param n: Total number of steps. :param DisplayText: A string", "function. ''' @wraps(function) def function_timer(*args, **kwargs): t0 = time.time() result = function(*args, **kwargs)", "alternative allele (germline unlikely since it is called w.r.t normal?) vaf = reportedVAF", "return(int(pipe.read().decode(\"utf-8\").lstrip(\" \").split(\" \")[0])) def UpdateProgress(i, n, DisplayText): ''' Prints a progress bar where", "if variantType == '': variantType = '.' # Determine type of variant to", "for read in [alt_reads, ialt_reads] if read != \"NA\"][0] total_reads = str(int(ref_reads) +", "# Normal variant field if anything if line[41]==\"NA\": normalGenotype = \".:.,.:.:.\" else: normalGenotype", "Fetched reference sequence. ''' proc = subprocess.Popen(['samtools','faidx',refGenome, genomicPos], stdout=subprocess.PIPE) proc.wait() outInfo = proc.stdout.readlines()", "float(total_reads)) elif i_t_vaf != \"\" and i_t_vaf != \"NA\" and ref_reads == 'NA'", "for read in [ref_reads, iref_reads] if read != \"NA\"][0] alt_reads = [read for", "out that is informative. :return: None ''' sys.stdout.write('\\r') j = (i + 1)", "depth across this site\\\">\\n\") ioObject.write(\"##FORMAT=<ID=VF,Number=1,Type=Float,Description=\\\"Variant Allele Frequency.\\\">\\n\") ioObject.write(\"#CHROM\\tPOS\\tID\\tREF\\tALT\\tQUAL\\tFILTER\\tINFO\\tFORMAT\\t%s\\t%s\\n\"%(normalID,tumorID)) @fn_timer def ProcessFile(Options): n =", "return (lineOut) def CreateVCFLine(line, errorFile, Options): line = line.rstrip('\\n').split('\\t') # Genomic Position chrom,", "base in the insertion in MAF vcfRef = refSeq[0] # VCF has base", "'+' and strand != '-'): with open(errorFile, 'a') as errerOut: errerOut.write('\\t'.join(line)+'\\n') if Options.verbose:", "''' print(\"INFO: Verifying maf file.\") if fileLength > 200: n=0.02 else: n=1. a", "- 1].replace('.maf', '.ignoredSNVs.maf') with open(errorFile, 'w') as errorOut: errorOut.write(header) CreateHeader(outVCF, Options, tumorID, normalID)", "you want to print out that is informative. :return: None ''' sys.stdout.write('\\r') j", ":param ref: Reference sequence to compare to fetched sequence. :return: Fetched reference sequence.", "= np.arange(fileLength) np.random.shuffle(a) a = list(a[:int(fileLength*n)]) i = 0 count = 0 for", "Get phasing information and determine reads for vaf==1 if ref_reads == 'NA' or", "'NA': vaf = i_t_vaf GT = \"./.\" ref_reads = '.' alt_reads = '.'", "much of what I'm choosing to filter out as possible... if Options.verbose: print(\"WARNING:", ":param refGenome: Reference Fasta file :param genomicPos: Genomic Position of interest. :param ref:", "ioObject.write(\"##sampleColumns=Normal.Tumor\\n\") ioObject.write(\"##INFO=<ID=MAF_Hugo_Symbol,Number=1,Type=String,Description=\\\"HUGO Symbol in original MAF file.\\\">\\n\") ioObject.write(\"##INFO=<ID=MAF_ref_context,Number=1,Type=String,Description=\\\"Reference context in original MAF file.\\\">\\n\")", "as errerOut: errerOut.write('\\t'.join(line)+'\\n') if Options.verbose: print(\"WARNING: %s\" % '\\t'.join(line)) return(None) # Simple SNV", "UpdateProgress(i, n, \"Processing Maf File\") if line.startswith('Hugo_Symbol Chromosome Start_position'): count+=1 i += 1", "sequence. :return: Fetched reference sequence. ''' proc = subprocess.Popen(['samtools','faidx',refGenome, genomicPos], stdout=subprocess.PIPE) proc.wait() outInfo", "'NA'): GT = \"0/1\" ref_reads = [read for read in [ref_reads, iref_reads] if", "is not None: outVCF.write('\\t'.join(linetoWrite)+'\\n') print('') print(\"INFO: Sorting vcf file.\") vcfFile = Options.outDir +", "\";MAF_ref_context=\" + line[15].upper() + \";MAF_Genome_Change=\" + line[14] + \";MAF_Variant_Type=\" + variantType + \";MAF_Variant_Classification=\"", "@wraps(function) def function_timer(*args, **kwargs): t0 = time.time() result = function(*args, **kwargs) t1 =", "normalGenotype = \".:.,.:.:%s\" % (line[41]) lineOut = [chrom, vcfPos, rsid, vcfRef, vcfAlt, QUAL,", "n=0.02 else: n=1. a = np.arange(fileLength) np.random.shuffle(a) a = list(a[:int(fileLength*n)]) i = 0", "samples the file to ensure proper reference file is used. Random sampling is", "total_reads, vaf]) # Last check for interesting but unresolved MAF line if (ref", "or alt_reads == 'NA' and reportedVAF == 'NA': with open(errorFile, 'a') as errerOut:", "help=\"Use this flag to verify reference matching to maf file. Default=False\") parser.add_option('-v', '--verbose',", "Get read information iref_reads = line[37] ialt_reads = line[36] ref_reads = line[39] alt_reads", "refAnchorPos = str(int(pos)-1) # Fetch the base that precedes the deletion. refSeq =", "pos, id = line[1], line[2], line[10] # Get rs ID rsid = line[10]", "variantType, strand, errorFile, Options) elif variantType==\"INS\": linetowrite = processINS(line, chrom, pos, rsid, mutType,", "= refSeq # VCF has base directly preceding the deletion as the alternative", "of more than 200 variants. :param mafFile: Input mafFile object (opened) :param Options:", "\"\" or i_t_vaf == \"NA\") and ref_reads == 'NA' and iref_reads == 'NA'", "verify reference matching to maf file. Default=False\") parser.add_option('-v', '--verbose', dest='verbose', default=False, action='store_true', help=\"Use", "line[6] if variantClass != \"INS\" and variantClass != \"TNP\" and variantClass !=\"ONP\": toContinue", "needed. :param check: Whether or not to throw error if the provided reference", "altAllele = tAllele1 # Obtain the reference sequence + 1 preceding base for", "interest. :return: A function to wrap around a function. ''' @wraps(function) def function_timer(*args,", "original MAF file.\\\">\\n\") ioObject.write(\"##INFO=<ID=DCC_Project_Code,Number=1,Type=String,Description=\\\"DCC Project Code in original MAF file.\\\">\\n\") ioObject.write(\"##FORMAT=<ID=GT,Number=1,Type=String,Description=\\\"Genotype\\\">\\n\") ioObject.write(\"##FORMAT=<ID=AD,Number=2,Type=Integer,Description=\\\"Allelic depths", ":return: None ''' print(\"INFO: Verifying maf file.\") if fileLength > 200: n=0.02 else:", "and ALT(s) in the order listed\\\">\\n\") ioObject.write(\"##FORMAT=<ID=DP,Number=1,Type=Integer,Description=\\\"Total read depth across this site\\\">\\n\") ioObject.write(\"##FORMAT=<ID=VF,Number=1,Type=Float,Description=\\\"Variant", "line[0] + \";MAF_ref_context=\" + line[15].upper() + \";MAF_Genome_Change=\" + line[ 14] + \";MAF_Variant_Type=\" +", "converted.\") parser.add_option('-o', '--output_dir', dest=\"outDir\", default=None, help=\"Output directory for .vcf file\") parser.add_option('-r', '--ref_genome', dest=\"refGenome\",", "base plus the reported deletion in the MAF file. vcfRef = refSeq #", "chrom, pos, rsid, mutType, variantType, strand, errorFile, Options) elif variantType==\"DEL\": linetowrite = processDEL(line,", "= refSeq[0]+altAllele vcfPos = refAnchorPos # Get read information iref_reads = line[37] ialt_reads", "function_timer def UpdateProgressGetN(fileName): if fileName[len(fileName)-1]==\"z\": cmd = \"gzip -cd %s | wc -l\"", "+= 1 else: i += 1 linetoWrite = CreateVCFLine(line, errorFile, Options) if linetoWrite", "and determine reads for vaf==1 if (ref_reads != 'NA' or iref_reads!='NA') and (alt_reads", "elif i != 0 and line.startswith('Hugo_Symbol Chromosome Start_position') == False: print(\"\") print(\"ERROR: No", "= line[6] if variantClass != \"INS\" and variantClass != \"TNP\" and variantClass !=\"ONP\":", "== True: toContinue = True else: sys.exit(\"What the fuck\") i+=1 print('') return(toContinue) def", "-l\" % (fileName) pipe = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout else: cmd = \"wc -l", "tAllele1 refAllele = tAllele2 else: altAllele = tAllele2 refAllele = tAllele1 # Obtain", "iref_reads] if read != \"NA\"][0] alt_reads = [read for read in [alt_reads, ialt_reads]", "progress bar where appropriate. :param i: Current Step :param n: Total number of", "'.', INFO, \"GT:AD:DP:VF\", normalGenotype, sampleField] return(lineOut) def processDEL(line, chrom, pos, rsid, mutType, variantType,", "default=None, help=\"Output directory for .vcf file\") parser.add_option('-r', '--ref_genome', dest=\"refGenome\", default=\"/Users/schencro/Desktop/Bioinformatics_Tools/Ref_Genomes/Ensembl/GRCh37.75/GRCh37.75.fa\", help=\"Reference genome to", "!= 'NA'): GT = \"0/1\" ref_reads = [read for read in [ref_reads, iref_reads]", "+= 1 linetoWrite = CreateVCFLine(line, errorFile, Options) if linetoWrite is not None: outVCF.write('\\t'.join(linetoWrite)+'\\n')", "called w.r.t normal?) vaf = reportedVAF # Sets VAF equal to 1 if", "elif i_t_vaf!=\"\" and i_t_vaf!=\"NA\" and ref_reads == 'NA' and iref_reads=='NA' and alt_reads ==", "datetime.datetime.now() ioObject.write(\"##fileformat=VCFv4.2\\n\") ioObject.write(\"##fileDate=%s\\n\"%(now.date())) ioObject.write(\"##source=maf2vcf.py\\n\") ioObject.write(\"##reference=%s\\n\"%(Options.refGenome)) ioObject.write(\"##sampleColumns=Normal.Tumor\\n\") ioObject.write(\"##INFO=<ID=MAF_Hugo_Symbol,Number=1,Type=String,Description=\\\"HUGO Symbol in original MAF file.\\\">\\n\") ioObject.write(\"##INFO=<ID=MAF_ref_context,Number=1,Type=String,Description=\\\"Reference", "if k==i]) # if checkIt==1: UpdateProgress(count, len(a), \"INFO: Verifying maf file\") count+=1 line", "sampleField = ':'.join([GT, ','.join([ref_reads, alt_reads]), total_reads, vaf]) # Last check for interesting but", "the deletion. refSeq = SamtoolsFaidx(Options.refGenome, chrom + \":\" + refAnchorPos + \"-\" +", "if (ref_reads != 'NA' or iref_reads != 'NA') and (alt_reads != 'NA' or", "INFO, \"GT:AD:DP:VF\", normalGenotype, sampleField] return(lineOut) def processINS(line, chrom, pos, rsid, mutType, variantType, strand,", "maf file, output directory, and reference genome (single fasta file).\") sys.exit() else: pass", "if mutType == '': mutType = '.' if variantType == '': variantType =", "(if SNP) in original MAF file.\\\">\\n\") ioObject.write(\"##INFO=<ID=DCC_Project_Code,Number=1,Type=String,Description=\\\"DCC Project Code in original MAF file.\\\">\\n\")", "Options.maf.split('/')[len(Options.maf.split('/'))-1].replace('.maf','.vcf') vcfFileSorted = Options.outDir + Options.maf.split('/')[len(Options.maf.split('/'))-1].replace('.head.maf','.sorted.vcf.gz') os.system(\"cat %s | awk '$1 ~ /^#/", ":return: Fetched reference sequence. ''' proc = subprocess.Popen(['samtools','faidx',refGenome, genomicPos], stdout=subprocess.PIPE) proc.wait() outInfo =", "sys.exit(\"ERROR: Problem in id column\") # Strand Information strand = line[4] # Variant", "= ''.join([line.decode('utf-8').rstrip('\\n') for line in outInfo[1:]]) if check: if refSeq == ref: return(True)", "<ref.fa>' parser = OptionParser(usage) parser.add_option('-i', '--input_maf', dest=\"maf\", default=None, help=\".maf file to be converted.\")", "\";MAF_Variant_Classification=\" + mutType + \";DCC_Project_Code=\" + \\ line[44] # Normal variant field if", "else: print('ERROR: May not be proper reference genome') print('ERROR: Improper reference. Found %s", "in outInfo[1:]]) if check: if refSeq == ref: return(True) else: print('ERROR: May not", "time.time() print (\"INFO: Total time running %s: %s minutes\" % (function.__name__, str(round((t1-t0)/60.,2))) )", "line i+=1 tumorID = toPullIDs[12] normalID = toPullIDs[13] count = 0 i =", "compare to fetched sequence. :return: Fetched reference sequence. ''' proc = subprocess.Popen(['samtools','faidx',refGenome, genomicPos],", "1: toPullIDs = line.rstrip('\\n').split('\\t') break else: header = line i+=1 tumorID = toPullIDs[12]", "'.' total_reads = '.' vaf = '.' else: sys.exit(\"ERROR: Problem processing INS %s\"", "a]) # sys.exit(\"Problem here\") elif i != 0 and line.startswith('Hugo_Symbol Chromosome Start_position') ==", "line in mafFile: if i != 0 and line.startswith('Hugo_Symbol Chromosome Start_position') == False:", "sequence + 1 preceding base for the DEL refAnchorPos = str(int(pos)-1) # Fetch", "mutType = line[5] variantType = line[6] # Create proper vcf formatted information if", "is the preceding base plus the reported deletion in the MAF file. vcfRef", "Start_position') == False: # checkIt = len([k for k in a if k==i])", "# print(line) # sys.exit(\"ERROR: Malformed MAF entry.\") return(linetowrite) def CreateHeader(ioObject, Options, tumorID, normalID):", "print('ERROR: May not be proper reference genome') print('ERROR: Improper reference. Found %s at", "line[5] variantType = line[6] # Create proper vcf formatted information if mutType ==", "rsid, vcfRef, vcfAlt, QUAL, '.', INFO, \"GT:AD:DP:VF\", normalGenotype, sampleField] return(lineOut) def processINS(line, chrom,", "depths of REF and ALT(s) in the order listed\\\">\\n\") ioObject.write(\"##FORMAT=<ID=DP,Number=1,Type=Integer,Description=\\\"Total read depth across", "= time.time() print (\"INFO: Total time running %s: %s minutes\" % (function.__name__, str(round((t1-t0)/60.,2)))", "= subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout return(int(pipe.read().decode(\"utf-8\").lstrip(\" \").split(\" \")[0])) def UpdateProgress(i, n, DisplayText): ''' Prints", "duplicitious, but I explicityly want to know as much of what I'm choosing", "%s | wc -l\" % (fileName) pipe = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout else: cmd", "MAF file. vcfRef = refSeq # VCF has base directly preceding the deletion", "= str(int(alt_reads) / float(total_reads)) elif i_t_vaf != \"\" and i_t_vaf != \"NA\" and", "Current Step :param n: Total number of steps. :param DisplayText: A string that", "- 1) # Fetch the base that precedes the deletion. refSeq = SamtoolsFaidx(Options.refGenome,", "the preceding base plus the reported deletion in the MAF file. vcfRef =", "= processDEL(line, chrom, pos, rsid, mutType, variantType, strand, errorFile, Options) elif variantType==\"INS\": linetowrite", "= \"gzip -cd %s | wc -l\" % (fileName) pipe = subprocess.Popen(cmd, shell=True,", "in mafFile: if i != 0 and line.startswith('Hugo_Symbol Chromosome Start_position') == False: #", "on verbose mode. Default=False\") (options, args) = parser.parse_args() if options.maf is None or", "file.\") if fileLength > 200: n=0.02 else: n=1. a = np.arange(fileLength) np.random.shuffle(a) a", "or variantType==\"ONP\": with open(errorFile, 'a') as errerOut: errerOut.write('\\t'.join(line)+'\\n') else: # This may seem", "import OptionParser import subprocess from functools import wraps import datetime import time import", "action='store_true', help=\"Use this flag to verify reference matching to maf file. Default=False\") parser.add_option('-v',", "for the DEL refAnchorPos = str(int(pos)-1) # Fetch the base that precedes the", "= '.' elif rsid.startswith(\"rs\") == False: if Options.verbose: print(\"ERROR: %s\"%(line)) sys.exit(\"ERROR: Problem in", "= line.rstrip('\\n').split('\\t') break else: header = line i+=1 tumorID = toPullIDs[12] normalID =", "and alt_reads == 'NA' and ialt_reads=='NA': vaf=i_t_vaf GT=\"./.\" ref_reads = '.' alt_reads =", "to wrap around a function. ''' @wraps(function) def function_timer(*args, **kwargs): t0 = time.time()", "\"-\" + line[3] ref = line[7] mutType = line[5] variantClass = line[6] if", "ref) if count == len(a): print('') return(toContinue) # else: # print(checkIt) # print(line)", "total_reads = str(int(ref_reads) + int(alt_reads)) vaf = str(int(alt_reads) / float(total_reads)) elif i_t_vaf !=", "if linetoWrite is not None: outVCF.write('\\t'.join(linetoWrite)+'\\n') print('') print(\"INFO: Sorting vcf file.\") vcfFile =", "normalGenotype, sampleField] return(lineOut) def processINS(line, chrom, pos, rsid, mutType, variantType, strand, errorFile, Options):", "read :return: None ''' print(\"INFO: Verifying maf file.\") if fileLength > 200: n=0.02", "pass return (options, parser) def fn_timer(function): ''' Use this as a wrapper at", "\"-\" + line[3], check=False) # VCF reference is the preceding base in the", "is the preceding base in the insertion in MAF vcfRef = refSeq[0] #", "== '': rsid = '.' elif rsid.startswith(\"rs\") == False: if Options.verbose: print(\"ERROR: %s\"%(line))", "'': QUAL = '.' if tAllele1 == '-': altAllele = tAllele2 else: altAllele", "!= 'NA' or iref_reads!='NA') and (alt_reads != 'NA' or ialt_reads!='NA'): GT=\"0/1\" ref_reads =", "''' Prints a progress bar where appropriate. :param i: Current Step :param n:", "== 'NA' or QUAL == '': QUAL = '.' if ref == tAllele1:", "vcfRef, vcfAlt, QUAL, '.', INFO, \"GT:AD:DP:VF\", normalGenotype, sampleField] return (lineOut) def CreateVCFLine(line, errorFile,", "not be proper reference genome') print('ERROR: Improper reference. Found %s at %s. Reference", "errorFile, Options): ref = line[7] tAllele1 = line[8] # Normal Allele tAllele2 =", "str(int(pos) - 1) # Fetch the base that precedes the deletion. refSeq =", "1 linetoWrite = CreateVCFLine(line, errorFile, Options) if linetoWrite is not None: outVCF.write('\\t'.join(linetoWrite)+'\\n') print('')", "ialt_reads != 'NA'): GT = \"0/1\" ref_reads = [read for read in [ref_reads,", "= True else: sys.exit(\"What the fuck\") i+=1 print('') return(toContinue) def processSNP(line, chrom, pos,", "!=\"ONP\": toContinue = SamtoolsFaidx(Options.refGenome, genomicPos, ref) if count == len(a): print('') return(toContinue) #", "n, \"Processing Maf File\") if line.startswith('Hugo_Symbol Chromosome Start_position'): count+=1 i += 1 else:", "strand, errorFile, Options) elif variantType==\"DEL\": linetowrite = processDEL(line, chrom, pos, rsid, mutType, variantType,", "line.rstrip('\\n').split('\\t') genomicPos = line[1] + \":\" + line[2] + \"-\" + line[3] ref", "1].replace('.maf', '.ignoredSNVs.maf') with open(errorFile, 'w') as errorOut: errorOut.write(header) CreateHeader(outVCF, Options, tumorID, normalID) for", "or i_t_vaf==\"NA\") and ref_reads == 'NA' and iref_reads=='NA' and alt_reads == 'NA' and", "import datetime import time import numpy as np def OptionParsing(): usage = 'usage:", "fileName[len(fileName)-1]==\"z\": cmd = \"gzip -cd %s | wc -l\" % (fileName) pipe =", "it is called w.r.t normal?) vaf = reportedVAF # Sets VAF equal to", "else: n=1. a = np.arange(fileLength) np.random.shuffle(a) a = list(a[:int(fileLength*n)]) i = 0 count", "(ref_reads != 'NA' or iref_reads != 'NA') and (alt_reads != 'NA' or ialt_reads", "processing INS %s\" % ('\\t'.join(line))) sampleField = ':'.join([GT, ','.join([ref_reads, alt_reads]), total_reads, vaf]) #", "line[4] # Variant Classification/Type (Type is SNP, INS, DEL, etc.) mutType = line[5]", "| wc -l\" % (fileName) pipe = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout else: cmd =", "# checkIt = len([k for k in a if k==i]) # if checkIt==1:", "reference is the preceding base in the insertion in MAF vcfRef = refSeq[0]", "if i != 0 and line.startswith('Hugo_Symbol Chromosome Start_position') == False: # checkIt =", "sort -k1,1 -k2,2n\\\"}' | gzip > %s\"%(vcfFile, vcfFileSorted)) os.system(\"rm %s\"%(vcfFile)) os.system(\"gzip %s\"%(errorFile)) def", "%s\" % ('=' * int(20 * j), 100 * j, DisplayText)) sys.stdout.flush() def", "print('') print(\"INFO: Sorting vcf file.\") vcfFile = Options.outDir + Options.maf.split('/')[len(Options.maf.split('/'))-1].replace('.maf','.vcf') vcfFileSorted = Options.outDir", "and line.startswith('Hugo_Symbol Chromosome Start_position') == False: # checkIt = len([k for k in", "line.startswith('Hugo_Symbol Chromosome Start_position') == False: print(\"\") print(\"ERROR: No header found in maf file.\")", "ialt_reads == 'NA': GT = './.' ref_reads = '.' alt_reads = '.' total_reads", "= UpdateProgressGetN(Options.maf) if Options.spotcheck: with open(Options.maf, 'r') as inFile: SpotCheckProperReference(inFile, Options, n) with", "tAllele2 = line[9] # Alt Allele Typically QUAL = line[42] if QUAL ==", "default=False, action='store_true', help=\"Use this flag to turn on verbose mode. Default=False\") (options, args)", "and determine reads for vaf==1 if ref_reads == 'NA' or alt_reads == 'NA'", "id = line[1], line[2], line[10] # Get rs ID rsid = line[10] if", "CreateHeader(ioObject, Options, tumorID, normalID): now = datetime.datetime.now() ioObject.write(\"##fileformat=VCFv4.2\\n\") ioObject.write(\"##fileDate=%s\\n\"%(now.date())) ioObject.write(\"##source=maf2vcf.py\\n\") ioObject.write(\"##reference=%s\\n\"%(Options.refGenome)) ioObject.write(\"##sampleColumns=Normal.Tumor\\n\") ioObject.write(\"##INFO=<ID=MAF_Hugo_Symbol,Number=1,Type=String,Description=\\\"HUGO", "information and determine reads for vaf==1 if ref_reads == 'NA' or alt_reads ==", "# Create proper vcf formatted information if mutType == '': mutType = '.'", "i = 0 count = 0 for line in mafFile: if i !=", "deletion in the MAF file. vcfRef = refSeq # VCF has base directly", "vaf = '.' else: sys.exit(\"ERROR: Problem processing INS %s\" % ('\\t'.join(line))) sampleField =", "'.' total_reads = alt_reads else: alt_reads = '.' total_reads = ref_reads sampleField =", "in original MAF file.\\\">\\n\") ioObject.write(\"##INFO=<ID=MAF_ref_context,Number=1,Type=String,Description=\\\"Reference context in original MAF file.\\\">\\n\") ioObject.write(\"##INFO=<ID=MAF_Genome_Change,Number=1,Type=String,Description=\\\"Genome change in", "sampleField = ':'.join([GT, ','.join([ref_reads, alt_reads]), total_reads, vaf]) # Tossing these very strange mutations", "alternative base and the variant pos vcfAlt=refSeq[0] vcfPos=refAnchorPos # Get read information iref_reads", "= Options.outDir + Options.maf.split('/')[len(Options.maf.split('/')) - 1].replace('.maf', '.ignoredSNVs.maf') with open(errorFile, 'w') as errorOut: errorOut.write(header)", "'NA' or iref_reads!='NA') and (alt_reads != 'NA' or ialt_reads!='NA'): GT=\"0/1\" ref_reads = [read", "# Sets VAF equal to 1 if ref_reads == 'NA': ref_reads = '.'", "tAllele2 refAllele = tAllele1 # Obtain the reference sequence + 1 preceding base", "Reference sequence to compare to fetched sequence. :return: Fetched reference sequence. ''' proc", "errerOut: errerOut.write('\\t'.join(line)+'\\n') if Options.verbose: print(\"WARNING: %s\" % '\\t'.join(line)) return(None) # Simple SNV cases", "(line[41]) lineOut = [chrom, vcfPos, rsid, vcfRef, vcfAlt, QUAL, '.', INFO, \"GT:AD:DP:VF\", normalGenotype,", "False: print(\"\") print(\"ERROR: No header found in maf file.\") elif line.startswith('Hugo_Symbol Chromosome Start_position')", "checkIt==1: UpdateProgress(count, len(a), \"INFO: Verifying maf file\") count+=1 line = line.rstrip('\\n').split('\\t') genomicPos =", "if (ref != tAllele1 and ref != tAllele2) or (strand != '+' and", "for the DEL refAnchorPos = str(int(pos) - 1) # Fetch the base that", "== 'NA' and ialt_reads == 'NA': GT = './.' ref_reads = '.' alt_reads", "print('') with open(errorFile, 'a') as errerOut: errerOut.write('\\t'.join(line)+'\\n') # print(line) # sys.exit(\"ERROR: Malformed MAF", "+ \";MAF_ref_context=\" + line[15].upper() + \";MAF_Genome_Change=\" + line[14] + \";MAF_Variant_Type=\" + variantType +", "!= 'NA' or ialt_reads!='NA'): GT=\"0/1\" ref_reads = [read for read in [ref_reads, iref_reads]", "# else: # print(checkIt) # print(line) # print([k for k in a]) #", "sys.stdout.write(\"[%-20s] %d%%\\t INFO: %s\" % ('=' * int(20 * j), 100 * j,", "anything if line[41] == \"NA\": normalGenotype = \".:.,.:.:.\" else: normalGenotype = \".:.,.:.:%s\" %", "print(\"ERROR: %s\"%(line)) sys.exit(\"ERROR: Problem in id column\") # Strand Information strand = line[4]", "(fileName) pipe = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout return(int(pipe.read().decode(\"utf-8\").lstrip(\" \").split(\" \")[0])) def UpdateProgress(i, n, DisplayText):", "'.' elif (i_t_vaf==\"\" or i_t_vaf==\"NA\") and ref_reads == 'NA' and iref_reads=='NA' and alt_reads", "line[9] # Alt Allele Typically QUAL = line[42] if QUAL == 'None' or", "variantType==\"INS\": linetowrite = processINS(line, chrom, pos, rsid, mutType, variantType, strand, errorFile, Options) elif", "provided reference matches :param refGenome: Reference Fasta file :param genomicPos: Genomic Position of", "for k in a]) # sys.exit(\"Problem here\") elif i != 0 and line.startswith('Hugo_Symbol", "this as a wrapper at the top of any function you want to", "Randomly samples the file to ensure proper reference file is used. Random sampling", "Options.maf.split('/')[len(Options.maf.split('/'))-1].replace('.head.maf','.sorted.vcf.gz') os.system(\"cat %s | awk '$1 ~ /^#/ {print $0;next} {print $0 |", "the alternative base and the variant pos vcfAlt = refSeq[0]+altAllele vcfPos = refAnchorPos", "maf file\") count+=1 line = line.rstrip('\\n').split('\\t') genomicPos = line[1] + \":\" + line[2]", "!= 'NA' or iref_reads != 'NA') and (alt_reads != 'NA' or ialt_reads !=", "file being read :return: None ''' print(\"INFO: Verifying maf file.\") if fileLength >", "= parser.parse_args() if options.maf is None or options.outDir is None or options.refGenome is", "or options.refGenome is None: print(\"ERROR: Please include arguments for maf file, output directory,", "is informative. :return: None ''' sys.stdout.write('\\r') j = (i + 1) / n", "options.outDir is None or options.refGenome is None: print(\"ERROR: Please include arguments for maf", "time information about. :param function: Function of interest. :return: A function to wrap", "context in original MAF file.\\\">\\n\") ioObject.write(\"##INFO=<ID=MAF_Genome_Change,Number=1,Type=String,Description=\\\"Genome change in original MAF file.\\\">\\n\") ioObject.write(\"##INFO=<ID=MAF_Variant_Type,Number=1,Type=String,Description=\\\"Variant type", "Found %s at %s. Reference genome shows %s' % (ref, genomicPos, refSeq)) sys.exit()", "(lineOut) def CreateVCFLine(line, errorFile, Options): line = line.rstrip('\\n').split('\\t') # Genomic Position chrom, pos,", "else: cmd = \"wc -l %s\" % (fileName) pipe = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout", "reference. Found %s at %s. Reference genome shows %s' % (ref, genomicPos, refSeq))", "the variant pos vcfAlt=refSeq[0] vcfPos=refAnchorPos # Get read information iref_reads = line[37] ialt_reads", "@fn_timer def ProcessFile(Options): n = UpdateProgressGetN(Options.maf) if Options.spotcheck: with open(Options.maf, 'r') as inFile:", "Alt Allele Typically QUAL = line[42] if QUAL == 'None' or QUAL ==", "of variant to continue processing. linetowrite = None if variantType==\"SNP\": linetowrite = processSNP(line,", "= tAllele1 refAllele = tAllele2 else: altAllele = tAllele2 refAllele = tAllele1 ref_reads", "os.system(\"gzip %s\"%(errorFile)) def main(): print(\"INFO: Processing MAF file.\") FilePath = os.path.dirname(os.path.abspath(__file__)) (Options, Parser)", "Normal variant field if anything if line[41]==\"NA\": normalGenotype = \".:.,.:.:.\" else: normalGenotype =", "base for the DEL refAnchorPos = str(int(pos)-1) # Fetch the base that precedes", "line[37] ialt_reads = line[36] ref_reads = line[39] alt_reads = line[38] reportedVAF = line[28]", "args) = parser.parse_args() if options.maf is None or options.outDir is None or options.refGenome", "proper reference is used. Will spot check 2% of a file of more", "ref = line[7] tAllele1 = line[8] # Normal Allele Typically tAllele2 = line[9]", "genomicPos], stdout=subprocess.PIPE) proc.wait() outInfo = proc.stdout.readlines() refSeq = ''.join([line.decode('utf-8').rstrip('\\n') for line in outInfo[1:]])", "ref = line[7] mutType = line[5] variantClass = line[6] if variantClass != \"INS\"", "normalGenotype = \".:.,.:.:.\" else: normalGenotype = \".:.,.:.:%s\"%(line[41]) # Final vcf line out lineOut", "(ref, genomicPos, refSeq)) sys.exit() return(None) else: return(refSeq) def SpotCheckProperReference(mafFile, Options, fileLength): ''' Randomly", "open(Options.maf, 'r') as inFile: SpotCheckProperReference(inFile, Options, n) with open(Options.maf,'r') as inFile: i =", "= \"./.\" ref_reads = '.' alt_reads = '.' total_reads = '.' elif (", "\"NA\"][0] total_reads = str(int(ref_reads) + int(alt_reads)) vaf = str(int(alt_reads) / float(total_reads)) elif i_t_vaf", "<directory> -r <ref.fa>' parser = OptionParser(usage) parser.add_option('-i', '--input_maf', dest=\"maf\", default=None, help=\".maf file to", "header found in maf file.\") elif line.startswith('Hugo_Symbol Chromosome Start_position') == True: toContinue =", "str(int(alt_reads) / float(total_reads)) elif i_t_vaf != \"\" and i_t_vaf != \"NA\" and ref_reads", "information and determine reads for vaf==1 if (ref_reads != 'NA' or iref_reads!='NA') and", "default=\"/Users/schencro/Desktop/Bioinformatics_Tools/Ref_Genomes/Ensembl/GRCh37.75/GRCh37.75.fa\", help=\"Reference genome to be used for maf2vcf conversion.\") parser.add_option('-s', '--spotCheckMaf', dest='spotcheck', default=False,", "refAnchorPos = str(int(pos) - 1) # Fetch the base that precedes the deletion.", "= line[39] alt_reads = line[38] reportedVAF = line[28] # Get phasing information and", "reportedVAF == 'NA': with open(errorFile, 'a') as errerOut: errerOut.write('\\t'.join(line)+'\\n') if Options.verbose: print(\"WARNING: %s\"", "GT = \"./.\" ref_reads = '.' alt_reads = '.' total_reads = '.' elif", "Chromosome Start_position') == True: toContinue = True else: sys.exit(\"What the fuck\") i+=1 print('')", "refAllele, altAllele, QUAL, '.', INFO, \"GT:AD:DP:VF\", normalGenotype, sampleField] return(lineOut) def processDEL(line, chrom, pos,", "= line[7] mutType = line[5] variantClass = line[6] if variantClass != \"INS\" and", "+ line[3], check=False) if refSeq[1:] != altAllele: print(\"ERROR: Deletion alternative allele does not", "linetowrite = processDEL(line, chrom, pos, rsid, mutType, variantType, strand, errorFile, Options) elif variantType==\"INS\":", "Problem processing INS %s\" % ('\\t'.join(line))) sampleField = ':'.join([GT, ','.join([ref_reads, alt_reads]), total_reads, vaf])", "the provided reference matches :param refGenome: Reference Fasta file :param genomicPos: Genomic Position", "/ float(total_reads), 4)) if vaf != '1.' and strand==\"+\" or strand==\"-\": GT=\"0|1\" else:", "ref != tAllele2) or (strand != '+' and strand != '-'): with open(errorFile,", "!= \"NA\"][0] total_reads = str(int(ref_reads) + int(alt_reads)) vaf = str(int(alt_reads) / float(total_reads)) elif", "appropriate. :param i: Current Step :param n: Total number of steps. :param DisplayText:", "variantType, strand, errorFile, Options): ref = line[7] tAllele1 = line[8] # Normal Allele", "that is informative. :return: None ''' sys.stdout.write('\\r') j = (i + 1) /", "vcf file.\") vcfFile = Options.outDir + Options.maf.split('/')[len(Options.maf.split('/'))-1].replace('.maf','.vcf') vcfFileSorted = Options.outDir + Options.maf.split('/')[len(Options.maf.split('/'))-1].replace('.head.maf','.sorted.vcf.gz') os.system(\"cat", "= line[36] ref_reads = line[39] alt_reads = line[38] reportedVAF = line[28] i_t_vaf =", "= line[1], line[2], line[10] # Get rs ID rsid = line[10] if rsid", "vaf==1 if (ref_reads != 'NA' or iref_reads != 'NA') and (alt_reads != 'NA'", "0 with open(Options.maf, 'r') as inFile: with open(Options.outDir + Options.maf.split('/')[len(Options.maf.split('/'))-1].replace('.maf','.vcf'), 'w') as outVCF:", "processDEL(line, chrom, pos, rsid, mutType, variantType, strand, errorFile, Options) elif variantType==\"INS\": linetowrite =", "else: sys.exit(\"What the fuck\") i+=1 print('') return(toContinue) def processSNP(line, chrom, pos, rsid, mutType,", "determine reads for vaf==1 if (ref_reads != 'NA' or iref_reads!='NA') and (alt_reads !=", "Typically QUAL = line[42] if QUAL == 'None' or QUAL == 'NA' or", "Options: Parser Options :param fileLength: Length of the file being read :return: None", "i_t_vaf == \"NA\") and ref_reads == 'NA' and iref_reads == 'NA' and alt_reads", "vaf==1 if ref_reads == 'NA' or alt_reads == 'NA' and reportedVAF == '1':", "as inFile: with open(Options.outDir + Options.maf.split('/')[len(Options.maf.split('/'))-1].replace('.maf','.vcf'), 'w') as outVCF: errorFile = Options.outDir +", "ioObject.write(\"##FORMAT=<ID=AD,Number=2,Type=Integer,Description=\\\"Allelic depths of REF and ALT(s) in the order listed\\\">\\n\") ioObject.write(\"##FORMAT=<ID=DP,Number=1,Type=Integer,Description=\\\"Total read depth", "if line[41] == \"NA\": normalGenotype = \".:.,.:.:.\" else: normalGenotype = \".:.,.:.:%s\" % (line[41])", "\"NA\") and ref_reads == 'NA' and iref_reads == 'NA' and alt_reads == 'NA'", "processSNP(line, chrom, pos, rsid, mutType, variantType, strand, errorFile, Options): ref = line[7] tAllele1", "be used for maf2vcf conversion.\") parser.add_option('-s', '--spotCheckMaf', dest='spotcheck', default=False, action='store_true', help=\"Use this flag", "pipe = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout else: cmd = \"wc -l %s\" % (fileName)", "with open(errorFile, 'a') as errerOut: errerOut.write('\\t'.join(line)+'\\n') if Options.verbose: print(\"WARNING: %s\" % '\\t'.join(line)) return(None)", "= str(int(ref_reads) + int(alt_reads)) vaf = str(int(alt_reads) / float(total_reads)) elif i_t_vaf != \"\"", "('=' * int(20 * j), 100 * j, DisplayText)) sys.stdout.flush() def SamtoolsFaidx(refGenome, genomicPos,", "= '.' total_reads = '.' vaf = '.' else: sys.exit(\"ERROR: Problem processing INS", "~ /^#/ {print $0;next} {print $0 | \\\"LC_ALL=C sort -k1,1 -k2,2n\\\"}' | gzip", "shows %s' % (ref, genomicPos, refSeq)) sys.exit() return(None) else: return(refSeq) def SpotCheckProperReference(mafFile, Options,", "sys.exit() else: pass return (options, parser) def fn_timer(function): ''' Use this as a", "Malformed MAF entry. %s\"%('\\t'.join(line))) print('') with open(errorFile, 'a') as errerOut: errerOut.write('\\t'.join(line)+'\\n') # print(line)", "vcf formatted information if mutType == '': mutType = '.' if variantType ==", "ref_reads = '.' total_reads = alt_reads else: alt_reads = '.' total_reads = ref_reads", "is None or options.refGenome is None: print(\"ERROR: Please include arguments for maf file,", "file, output directory, and reference genome (single fasta file).\") sys.exit() else: pass return", "type of variant to continue processing. linetowrite = None if variantType==\"SNP\": linetowrite =", "altAllele = tAllele1 refAllele = tAllele2 else: altAllele = tAllele2 refAllele = tAllele1", "return result return function_timer def UpdateProgressGetN(fileName): if fileName[len(fileName)-1]==\"z\": cmd = \"gzip -cd %s", "rsid, refAllele, altAllele, QUAL, '.', INFO, \"GT:AD:DP:VF\", normalGenotype, sampleField] return(lineOut) def processDEL(line, chrom,", "time import numpy as np def OptionParsing(): usage = 'usage: %prog -i <*.maf>", "(germline unlikely since it is called w.r.t normal?) vaf = reportedVAF # Sets", "error if the provided reference matches :param refGenome: Reference Fasta file :param genomicPos:", "in [alt_reads, ialt_reads] if read != \"NA\"][0] total_reads = str(int(ref_reads) + int(alt_reads)) vaf", "% (ref, genomicPos, refSeq)) sys.exit() return(None) else: return(refSeq) def SpotCheckProperReference(mafFile, Options, fileLength): '''", "**kwargs) t1 = time.time() print (\"INFO: Total time running %s: %s minutes\" %", "want to get run time information about. :param function: Function of interest. :return:", "print(\"ERROR: Please include arguments for maf file, output directory, and reference genome (single", "+ line[ 14] + \";MAF_Variant_Type=\" + variantType + \";MAF_Variant_Classification=\" + mutType + \";DCC_Project_Code=\"", "!= tAllele1 and ref != tAllele2) or (strand != '+' and strand !=", "INFO: %s\" % ('=' * int(20 * j), 100 * j, DisplayText)) sys.stdout.flush()", "Chromosome Start_position'): count+=1 i += 1 else: i += 1 linetoWrite = CreateVCFLine(line,", "and iref_reads=='NA' and alt_reads == 'NA' and ialt_reads=='NA': vaf=i_t_vaf GT=\"./.\" ref_reads = '.'", "a MAF to a vcf4.2 file using python >=3.6. Created by <NAME> 8", "+ int(alt_reads)) vaf = str(int(alt_reads)/float(total_reads)) elif i_t_vaf!=\"\" and i_t_vaf!=\"NA\" and ref_reads == 'NA'", "# This may seem duplicitious, but I explicityly want to know as much", "vcfPos, rsid, vcfRef, vcfAlt, QUAL, '.', INFO, \"GT:AD:DP:VF\", normalGenotype, sampleField] return(lineOut) def processINS(line,", "for maf file, output directory, and reference genome (single fasta file).\") sys.exit() else:", "be proper reference genome') print('ERROR: Improper reference. Found %s at %s. Reference genome", "SNP) in original MAF file.\\\">\\n\") ioObject.write(\"##INFO=<ID=DCC_Project_Code,Number=1,Type=String,Description=\\\"DCC Project Code in original MAF file.\\\">\\n\") ioObject.write(\"##FORMAT=<ID=GT,Number=1,Type=String,Description=\\\"Genotype\\\">\\n\")", "True: toContinue = True else: sys.exit(\"What the fuck\") i+=1 print('') return(toContinue) def processSNP(line,", "shell=True, stdout=subprocess.PIPE).stdout else: cmd = \"wc -l %s\" % (fileName) pipe = subprocess.Popen(cmd,", "'.' # Determine type of variant to continue processing. linetowrite = None if", "int(alt_reads)) vaf = repr(round(int(alt_reads) / float(total_reads), 4)) if vaf != '1.' and strand==\"+\"", "to fetched sequence. :return: Fetched reference sequence. ''' proc = subprocess.Popen(['samtools','faidx',refGenome, genomicPos], stdout=subprocess.PIPE)", "bar where appropriate. :param i: Current Step :param n: Total number of steps.", "+ line[0] + \";MAF_ref_context=\" + line[15].upper() + \";MAF_Genome_Change=\" + line[14] + \";MAF_Variant_Type=\" +", ":param Options: Parser Options :param fileLength: Length of the file being read :return:", "= [read for read in [alt_reads, ialt_reads] if read != \"NA\"][0] total_reads =", "line = line.rstrip('\\n').split('\\t') genomicPos = line[1] + \":\" + line[2] + \"-\" +", "None ''' sys.stdout.write('\\r') j = (i + 1) / n sys.stdout.write(\"[%-20s] %d%%\\t INFO:", "chrom, pos, rsid, mutType, variantType, strand, errorFile, Options): ref = line[7] tAllele1 =", "base and the variant pos vcfAlt=refSeq[0] vcfPos=refAnchorPos # Get read information iref_reads =", "as a wrapper at the top of any function you want to get", "''' Randomly samples the file to ensure proper reference file is used. Random", "\").split(\" \")[0])) def UpdateProgress(i, n, DisplayText): ''' Prints a progress bar where appropriate.", "alt_reads = [read for read in [alt_reads, ialt_reads] if read != \"NA\"][0] total_reads", "i += 1 else: i += 1 linetoWrite = CreateVCFLine(line, errorFile, Options) if", "variantType==\"ONP\": with open(errorFile, 'a') as errerOut: errerOut.write('\\t'.join(line)+'\\n') else: # This may seem duplicitious,", "== False: # checkIt = len([k for k in a if k==i]) #", "print(\"ERROR: No header found in maf file.\") elif line.startswith('Hugo_Symbol Chromosome Start_position') == True:", "ref_reads = line[39] alt_reads = line[38] reportedVAF = line[28] i_t_vaf = line[43] #", "DEL %s\"%('\\t'.join(line))) sampleField = ':'.join([GT, ','.join([ref_reads, alt_reads]), total_reads, vaf]) # Create INFO field", "id column\") # Strand Information strand = line[4] # Variant Classification/Type (Type is", "filter out as possible... if Options.verbose: print(\"WARNING: Malformed MAF entry. %s\"%('\\t'.join(line))) print('') with", "ref='', check=True): ''' Obtain reference sequence and perform check if needed. :param check:", "inFile: i = 0 for line in inFile: if i == 1: toPullIDs", "here\") elif i != 0 and line.startswith('Hugo_Symbol Chromosome Start_position') == False: print(\"\") print(\"ERROR:", "to be converted.\") parser.add_option('-o', '--output_dir', dest=\"outDir\", default=None, help=\"Output directory for .vcf file\") parser.add_option('-r',", "alt_reads='.' total_reads='.' vaf='.' else: sys.exit(\"ERROR: Problem processing DEL %s\"%('\\t'.join(line))) sampleField = ':'.join([GT, ','.join([ref_reads,", "# sys.exit(\"ERROR: Malformed MAF entry.\") return(linetowrite) def CreateHeader(ioObject, Options, tumorID, normalID): now =", "'': mutType = '.' if variantType == '': variantType = '.' # Determine", "Allele QUAL = line[42] if QUAL == 'None' or QUAL == 'NA' or", "GT=\"0/1\" sampleField = ':'.join([GT, ','.join([ref_reads, alt_reads]), total_reads, vaf]) # Last check for interesting", "\";MAF_Genome_Change=\" + line[ 14] + \";MAF_Variant_Type=\" + variantType + \";MAF_Variant_Classification=\" + mutType +", "== \"NA\") and ref_reads == 'NA' and iref_reads == 'NA' and alt_reads ==", "errerOut.write('\\t'.join(line)+'\\n') if Options.verbose: print(\"WARNING: %s\" % '\\t'.join(line)) return(None) # Simple SNV cases else:", "'.' elif rsid.startswith(\"rs\") == False: if Options.verbose: print(\"ERROR: %s\"%(line)) sys.exit(\"ERROR: Problem in id", "+ \"-\" + line[3], check=False) if refSeq[1:] != altAllele: print(\"ERROR: Deletion alternative allele", "= Options.outDir + Options.maf.split('/')[len(Options.maf.split('/'))-1].replace('.maf','.vcf') vcfFileSorted = Options.outDir + Options.maf.split('/')[len(Options.maf.split('/'))-1].replace('.head.maf','.sorted.vcf.gz') os.system(\"cat %s | awk", "tumorID, normalID) for line in inFile: UpdateProgress(i, n, \"Processing Maf File\") if line.startswith('Hugo_Symbol", "1 preceding base for the DEL refAnchorPos = str(int(pos)-1) # Fetch the base", "MAF file.\\\">\\n\") ioObject.write(\"##INFO=<ID=MAF_Variant_Classification,Number=1,Type=String,Description=\\\"Variant Classification (if SNP) in original MAF file.\\\">\\n\") ioObject.write(\"##INFO=<ID=DCC_Project_Code,Number=1,Type=String,Description=\\\"DCC Project Code", "!= 0 and line.startswith('Hugo_Symbol Chromosome Start_position') == False: print(\"\") print(\"ERROR: No header found", "ref_reads == 'NA' and iref_reads == 'NA' and alt_reads == 'NA' and ialt_reads", "= '.' elif ( i_t_vaf == \"\" or i_t_vaf == \"NA\") and ref_reads", "'./.' ref_reads = '.' alt_reads = '.' total_reads = '.' vaf = '.'", "','.join([ref_reads, alt_reads]), total_reads, vaf]) # Create INFO field INFO = \"MAF_Hugo_Symbol=\" + line[0]", "(fileName) pipe = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout else: cmd = \"wc -l %s\" %", "or options.outDir is None or options.refGenome is None: print(\"ERROR: Please include arguments for", "Options, tumorID, normalID): now = datetime.datetime.now() ioObject.write(\"##fileformat=VCFv4.2\\n\") ioObject.write(\"##fileDate=%s\\n\"%(now.date())) ioObject.write(\"##source=maf2vcf.py\\n\") ioObject.write(\"##reference=%s\\n\"%(Options.refGenome)) ioObject.write(\"##sampleColumns=Normal.Tumor\\n\") ioObject.write(\"##INFO=<ID=MAF_Hugo_Symbol,Number=1,Type=String,Description=\\\"HUGO Symbol", "QUAL = '.' if ref == tAllele1: altAllele = tAllele1 refAllele = tAllele2", "= '.' total_reads = alt_reads else: alt_reads = '.' total_reads = ref_reads sampleField", "\"GT:AD:DP:VF\", normalGenotype, sampleField] return (lineOut) def CreateVCFLine(line, errorFile, Options): line = line.rstrip('\\n').split('\\t') #", "Options.maf.split('/')[len(Options.maf.split('/'))-1].replace('.maf','.vcf'), 'w') as outVCF: errorFile = Options.outDir + Options.maf.split('/')[len(Options.maf.split('/')) - 1].replace('.maf', '.ignoredSNVs.maf') with", "%s. Reference genome shows %s' % (ref, genomicPos, refSeq)) sys.exit() return(None) else: return(refSeq)", "line[7] tAllele1 = line[8] # Normal Allele tAllele2 = line[9] # Alt Allele", "= '.' alt_reads = '.' total_reads = '.' elif (i_t_vaf==\"\" or i_t_vaf==\"NA\") and", "proper vcf formatted information if mutType == '': mutType = '.' if variantType", "Options): ref = line[7] tAllele1 = line[8] # Normal Allele Typically tAllele2 =", "order listed\\\">\\n\") ioObject.write(\"##FORMAT=<ID=DP,Number=1,Type=Integer,Description=\\\"Total read depth across this site\\\">\\n\") ioObject.write(\"##FORMAT=<ID=VF,Number=1,Type=Float,Description=\\\"Variant Allele Frequency.\\\">\\n\") ioObject.write(\"#CHROM\\tPOS\\tID\\tREF\\tALT\\tQUAL\\tFILTER\\tINFO\\tFORMAT\\t%s\\t%s\\n\"%(normalID,tumorID)) @fn_timer", "[read for read in [alt_reads, ialt_reads] if read != \"NA\"][0] total_reads = str(int(ref_reads)", "errorOut.write(header) CreateHeader(outVCF, Options, tumorID, normalID) for line in inFile: UpdateProgress(i, n, \"Processing Maf", "with open(errorFile, 'w') as errorOut: errorOut.write(header) CreateHeader(outVCF, Options, tumorID, normalID) for line in", "file to be converted.\") parser.add_option('-o', '--output_dir', dest=\"outDir\", default=None, help=\"Output directory for .vcf file\")", "| awk '$1 ~ /^#/ {print $0;next} {print $0 | \\\"LC_ALL=C sort -k1,1", "read information iref_reads = line[37] ialt_reads = line[36] ref_reads = line[39] alt_reads =", "unlikely since it is called w.r.t normal?) vaf = reportedVAF # Sets VAF", "Options.outDir + Options.maf.split('/')[len(Options.maf.split('/')) - 1].replace('.maf', '.ignoredSNVs.maf') with open(errorFile, 'w') as errorOut: errorOut.write(header) CreateHeader(outVCF,", "to be homozygous for alternative allele (germline unlikely since it is called w.r.t", "homozygous for alternative allele (germline unlikely since it is called w.r.t normal?) vaf", "strand==\"-\": GT=\"0|1\" else: GT=\"0/1\" sampleField = ':'.join([GT, ','.join([ref_reads, alt_reads]), total_reads, vaf]) # Last", "stdout=subprocess.PIPE).stdout else: cmd = \"wc -l %s\" % (fileName) pipe = subprocess.Popen(cmd, shell=True,", "refSeq[1:] != altAllele: print(\"ERROR: Deletion alternative allele does not match reference sequence. %s\"", "(opened) :param Options: Parser Options :param fileLength: Length of the file being read", "shell=True, stdout=subprocess.PIPE).stdout return(int(pipe.read().decode(\"utf-8\").lstrip(\" \").split(\" \")[0])) def UpdateProgress(i, n, DisplayText): ''' Prints a progress", "variantType == '': variantType = '.' # Determine type of variant to continue", "as errorOut: errorOut.write(header) CreateHeader(outVCF, Options, tumorID, normalID) for line in inFile: UpdateProgress(i, n,", "+ \";MAF_Genome_Change=\" + line[14] + \";MAF_Variant_Type=\" + variantType + \";MAF_Variant_Classification=\" + mutType +\";DCC_Project_Code=\"", "allele does not match reference sequence. %s\" % ('\\t'.join(line))) sys.exit() # VCF reference", "help=\"Use this flag to turn on verbose mode. Default=False\") (options, args) = parser.parse_args()", "= line[6] # Create proper vcf formatted information if mutType == '': mutType", "line[10] if rsid == '': rsid = '.' elif rsid.startswith(\"rs\") == False: if", "\"\" and i_t_vaf != \"NA\" and ref_reads == 'NA' and iref_reads == 'NA'", "file).\") sys.exit() else: pass return (options, parser) def fn_timer(function): ''' Use this as", "Step :param n: Total number of steps. :param DisplayText: A string that you", "know as much of what I'm choosing to filter out as possible... if", "information about. :param function: Function of interest. :return: A function to wrap around", "want to know as much of what I'm choosing to filter out as", "<*.maf> -o <directory> -r <ref.fa>' parser = OptionParser(usage) parser.add_option('-i', '--input_maf', dest=\"maf\", default=None, help=\".maf", "Project Code in original MAF file.\\\">\\n\") ioObject.write(\"##FORMAT=<ID=GT,Number=1,Type=String,Description=\\\"Genotype\\\">\\n\") ioObject.write(\"##FORMAT=<ID=AD,Number=2,Type=Integer,Description=\\\"Allelic depths of REF and ALT(s)", "<NAME> 8 March 2018 ''' import os import sys from optparse import OptionParser", "mutType = '.' if variantType == '': variantType = '.' # Determine type", "refAllele = tAllele2 else: altAllele = tAllele2 refAllele = tAllele1 # Obtain the", "+ \"-\" + line[3] ref = line[7] mutType = line[5] variantClass = line[6]", "def fn_timer(function): ''' Use this as a wrapper at the top of any", "elif ref_reads == 'NA' or alt_reads == 'NA' and reportedVAF == 'NA': with", "out as possible... if Options.verbose: print(\"WARNING: Malformed MAF entry. %s\"%('\\t'.join(line))) print('') with open(errorFile,", "'NA' and ialt_reads == 'NA': vaf = i_t_vaf GT = \"./.\" ref_reads =", "+ line[15].upper() + \";MAF_Genome_Change=\" + line[14] + \";MAF_Variant_Type=\" + variantType + \";MAF_Variant_Classification=\" +", "line[1], line[2], line[10] # Get rs ID rsid = line[10] if rsid ==", "rsid, mutType, variantType, strand, errorFile, Options) elif variantType==\"INS\": linetowrite = processINS(line, chrom, pos,", "ioObject.write(\"##INFO=<ID=MAF_Variant_Type,Number=1,Type=String,Description=\\\"Variant type (SNP,INS,DEL) in original MAF file.\\\">\\n\") ioObject.write(\"##INFO=<ID=MAF_Variant_Classification,Number=1,Type=String,Description=\\\"Variant Classification (if SNP) in original", "'NA' and reportedVAF == '1': GT = \"1/1\" # Appears to be homozygous", "for vaf==1 if (ref_reads != 'NA' or iref_reads!='NA') and (alt_reads != 'NA' or", "== 'NA' or alt_reads == 'NA' and reportedVAF == '1': GT = \"1/1\"", "!= 0 and line.startswith('Hugo_Symbol Chromosome Start_position') == False: # checkIt = len([k for", "line[3] ref = line[7] mutType = line[5] variantClass = line[6] if variantClass !=", "if anything if line[41] == \"NA\": normalGenotype = \".:.,.:.:.\" else: normalGenotype = \".:.,.:.:%s\"", "QUAL, '.', INFO, \"GT:AD:DP:VF\", normalGenotype, sampleField] return(lineOut) def processINS(line, chrom, pos, rsid, mutType,", "with open(errorFile, 'a') as errerOut: errerOut.write('\\t'.join(line)+'\\n') else: # This may seem duplicitious, but", "else: header = line i+=1 tumorID = toPullIDs[12] normalID = toPullIDs[13] count =", "+ refAnchorPos + \"-\" + line[3], check=False) if refSeq[1:] != altAllele: print(\"ERROR: Deletion", "variant field if anything if line[41] == \"NA\": normalGenotype = \".:.,.:.:.\" else: normalGenotype", "reference genome (single fasta file).\") sys.exit() else: pass return (options, parser) def fn_timer(function):", "total_reads = ref_reads sampleField = ':'.join([GT, ','.join([ref_reads, alt_reads]), total_reads, vaf]) # Tossing these", "= str(int(alt_reads)/float(total_reads)) elif i_t_vaf!=\"\" and i_t_vaf!=\"NA\" and ref_reads == 'NA' and iref_reads=='NA' and", "''' sys.stdout.write('\\r') j = (i + 1) / n sys.stdout.write(\"[%-20s] %d%%\\t INFO: %s\"", "return(None) else: return(refSeq) def SpotCheckProperReference(mafFile, Options, fileLength): ''' Randomly samples the file to", "if checkIt==1: UpdateProgress(count, len(a), \"INFO: Verifying maf file\") count+=1 line = line.rstrip('\\n').split('\\t') genomicPos", "line[39] alt_reads = line[38] reportedVAF = line[28] # Get phasing information and determine", "tAllele1 # Obtain the reference sequence + 1 preceding base for the DEL", "Start_position') == False: print(\"\") print(\"ERROR: No header found in maf file.\") elif line.startswith('Hugo_Symbol", "line in inFile: if i == 1: toPullIDs = line.rstrip('\\n').split('\\t') break else: header", "reference is the preceding base plus the reported deletion in the MAF file.", "errerOut.write('\\t'.join(line)+'\\n') if Options.verbose: print(\"WARNING: %s\" % '\\t'.join(line)) return(None) # Create INFO field INFO", "does not match reference sequence. %s\" % ('\\t'.join(line))) sys.exit() # VCF reference is", "file.\\\">\\n\") ioObject.write(\"##FORMAT=<ID=GT,Number=1,Type=String,Description=\\\"Genotype\\\">\\n\") ioObject.write(\"##FORMAT=<ID=AD,Number=2,Type=Integer,Description=\\\"Allelic depths of REF and ALT(s) in the order listed\\\">\\n\") ioObject.write(\"##FORMAT=<ID=DP,Number=1,Type=Integer,Description=\\\"Total", "and reportedVAF == '1': GT = \"1/1\" # Appears to be homozygous for", "altAllele: print(\"ERROR: Deletion alternative allele does not match reference sequence. %s\" % ('\\t'.join(line)))", "%s\"%('\\t'.join(line))) print('') with open(errorFile, 'a') as errerOut: errerOut.write('\\t'.join(line)+'\\n') # print(line) # sys.exit(\"ERROR: Malformed", "> %s\"%(vcfFile, vcfFileSorted)) os.system(\"rm %s\"%(vcfFile)) os.system(\"gzip %s\"%(errorFile)) def main(): print(\"INFO: Processing MAF file.\")", "def SpotCheckProperReference(mafFile, Options, fileLength): ''' Randomly samples the file to ensure proper reference", "= \".:.,.:.:%s\" % (line[41]) lineOut = [chrom, vcfPos, rsid, vcfRef, vcfAlt, QUAL, '.',", "\"1/1\" # Appears to be homozygous for alternative allele (germline unlikely since it", "line[ 14] + \";MAF_Variant_Type=\" + variantType + \";MAF_Variant_Classification=\" + mutType + \";DCC_Project_Code=\" +", "= list(a[:int(fileLength*n)]) i = 0 count = 0 for line in mafFile: if", "ref_reads sampleField = ':'.join([GT, ','.join([ref_reads, alt_reads]), total_reads, vaf]) # Tossing these very strange", "Determine type of variant to continue processing. linetowrite = None if variantType==\"SNP\": linetowrite", "else: sys.exit(\"ERROR: Problem processing INS %s\" % ('\\t'.join(line))) sampleField = ':'.join([GT, ','.join([ref_reads, alt_reads]),", "+ \":\" + line[2] + \"-\" + line[3] ref = line[7] mutType =", "help=\".maf file to be converted.\") parser.add_option('-o', '--output_dir', dest=\"outDir\", default=None, help=\"Output directory for .vcf", "convert a MAF to a vcf4.2 file using python >=3.6. Created by <NAME>", "outInfo[1:]]) if check: if refSeq == ref: return(True) else: print('ERROR: May not be", "mutType = line[5] variantClass = line[6] if variantClass != \"INS\" and variantClass !=", "def CreateVCFLine(line, errorFile, Options): line = line.rstrip('\\n').split('\\t') # Genomic Position chrom, pos, id", "dest='verbose', default=False, action='store_true', help=\"Use this flag to turn on verbose mode. Default=False\") (options,", "and strand==\"+\" or strand==\"-\": GT=\"0|1\" else: GT=\"0/1\" sampleField = ':'.join([GT, ','.join([ref_reads, alt_reads]), total_reads,", "result return function_timer def UpdateProgressGetN(fileName): if fileName[len(fileName)-1]==\"z\": cmd = \"gzip -cd %s |", "as the alternative base and the variant pos vcfAlt=refSeq[0] vcfPos=refAnchorPos # Get read", "dest=\"maf\", default=None, help=\".maf file to be converted.\") parser.add_option('-o', '--output_dir', dest=\"outDir\", default=None, help=\"Output directory", "**kwargs): t0 = time.time() result = function(*args, **kwargs) t1 = time.time() print (\"INFO:", "reference file is used. Random sampling is employed to ensure proper reference is", "100 * j, DisplayText)) sys.stdout.flush() def SamtoolsFaidx(refGenome, genomicPos, ref='', check=True): ''' Obtain reference", "(Type is SNP, INS, DEL, etc.) mutType = line[5] variantType = line[6] #", "ref_reads = [read for read in [ref_reads, iref_reads] if read != \"NA\"][0] alt_reads", "may seem duplicitious, but I explicityly want to know as much of what", "+ int(alt_reads)) vaf = str(int(alt_reads) / float(total_reads)) elif i_t_vaf != \"\" and i_t_vaf", "else: altAllele = tAllele1 # Obtain the reference sequence + 1 preceding base", "as much of what I'm choosing to filter out as possible... if Options.verbose:", "phasing information and determine reads for vaf==1 if (ref_reads != 'NA' or iref_reads!='NA')", "print(\"WARNING: Malformed MAF entry. %s\"%('\\t'.join(line))) print('') with open(errorFile, 'a') as errerOut: errerOut.write('\\t'.join(line)+'\\n') #", "import sys from optparse import OptionParser import subprocess from functools import wraps import", "= line[7] tAllele1 = line[8] # Normal Allele Typically tAllele2 = line[9] #", "= len([k for k in a if k==i]) # if checkIt==1: UpdateProgress(count, len(a),", "%s\" % (fileName) pipe = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout return(int(pipe.read().decode(\"utf-8\").lstrip(\" \").split(\" \")[0])) def UpdateProgress(i,", "'\\t'.join(line)) return(None) # Create INFO field INFO = \"MAF_Hugo_Symbol=\" + line[0] + \";MAF_ref_context=\"", "and ref_reads == 'NA' and iref_reads=='NA' and alt_reads == 'NA' and ialt_reads=='NA': GT='./.'", "elif i_t_vaf != \"\" and i_t_vaf != \"NA\" and ref_reads == 'NA' and", "= line[7] tAllele1 = line[8] # Normal Allele tAllele2 = line[9] # Alt", "14] + \";MAF_Variant_Type=\" + variantType + \";MAF_Variant_Classification=\" + mutType + \";DCC_Project_Code=\" + \\", "None: outVCF.write('\\t'.join(linetoWrite)+'\\n') print('') print(\"INFO: Sorting vcf file.\") vcfFile = Options.outDir + Options.maf.split('/')[len(Options.maf.split('/'))-1].replace('.maf','.vcf') vcfFileSorted", "read != \"NA\"][0] total_reads = str(int(ref_reads) + int(alt_reads)) vaf = str(int(alt_reads) / float(total_reads))", "for vaf==1 if ref_reads == 'NA' or alt_reads == 'NA' and reportedVAF ==", "= line i+=1 tumorID = toPullIDs[12] normalID = toPullIDs[13] count = 0 i", "options.maf is None or options.outDir is None or options.refGenome is None: print(\"ERROR: Please", "> 200: n=0.02 else: n=1. a = np.arange(fileLength) np.random.shuffle(a) a = list(a[:int(fileLength*n)]) i", "'-': altAllele = tAllele2 else: altAllele = tAllele1 # Obtain the reference sequence", "get run time information about. :param function: Function of interest. :return: A function", "\":\" + refAnchorPos + \"-\" + line[3], check=False) # VCF reference is the", "and alt_reads == 'NA' and ialt_reads == 'NA': GT = './.' ref_reads =", "for line in inFile: UpdateProgress(i, n, \"Processing Maf File\") if line.startswith('Hugo_Symbol Chromosome Start_position'):", "* j, DisplayText)) sys.stdout.flush() def SamtoolsFaidx(refGenome, genomicPos, ref='', check=True): ''' Obtain reference sequence", "Obtain reference sequence and perform check if needed. :param check: Whether or not" ]
[ "list = 18pfd_wk{i:02d}.txt folder = repository\\wk{i:02d} count_commits = True run_all = True pound_count", "folder = repository\\wk{i:02d} count_commits = True run_all = True pound_count = True after", "after = 2018-08-27 12:52 \"\"\" for i in range(5, 10+1): if 8 !=", "True run_all = True pound_count = True after = 2018-08-27 12:52 \"\"\" for", "pound_count = True after = 2018-08-27 12:52 \"\"\" for i in range(5, 10+1):", "= 2018-08-27 12:52 \"\"\" for i in range(5, 10+1): if 8 != i:", "<gh_stars>0 cfg_template = r\"\"\"[18pfd_{i:02d}] list = 18pfd_wk{i:02d}.txt folder = repository\\wk{i:02d} count_commits = True", "True after = 2018-08-27 12:52 \"\"\" for i in range(5, 10+1): if 8", "True pound_count = True after = 2018-08-27 12:52 \"\"\" for i in range(5,", "r\"\"\"[18pfd_{i:02d}] list = 18pfd_wk{i:02d}.txt folder = repository\\wk{i:02d} count_commits = True run_all = True", "= r\"\"\"[18pfd_{i:02d}] list = 18pfd_wk{i:02d}.txt folder = repository\\wk{i:02d} count_commits = True run_all =", "= True after = 2018-08-27 12:52 \"\"\" for i in range(5, 10+1): if", "= 18pfd_wk{i:02d}.txt folder = repository\\wk{i:02d} count_commits = True run_all = True pound_count =", "18pfd_wk{i:02d}.txt folder = repository\\wk{i:02d} count_commits = True run_all = True pound_count = True", "2018-08-27 12:52 \"\"\" for i in range(5, 10+1): if 8 != i: print(cfg_template.format(**{'i':", "= repository\\wk{i:02d} count_commits = True run_all = True pound_count = True after =", "12:52 \"\"\" for i in range(5, 10+1): if 8 != i: print(cfg_template.format(**{'i': i}))", "repository\\wk{i:02d} count_commits = True run_all = True pound_count = True after = 2018-08-27", "count_commits = True run_all = True pound_count = True after = 2018-08-27 12:52", "= True run_all = True pound_count = True after = 2018-08-27 12:52 \"\"\"", "run_all = True pound_count = True after = 2018-08-27 12:52 \"\"\" for i", "cfg_template = r\"\"\"[18pfd_{i:02d}] list = 18pfd_wk{i:02d}.txt folder = repository\\wk{i:02d} count_commits = True run_all", "= True pound_count = True after = 2018-08-27 12:52 \"\"\" for i in" ]
[ "def test_finalize(): with raises(TypeError): finalize(1) a = variable(range(10), default=2) assert finalize(a)[finalize(a).value] == finalize(a).value", "import Tunable from tuneit.finalize import finalize from pytest import raises def test_finalize(): with", "finalize(a).value.fixed d = b.copy() assert d.compute() == 4 assert b.tunable_variables == b.variables assert", "from pytest import raises def test_finalize(): with raises(TypeError): finalize(1) a = variable(range(10), default=2)", "b.copy() assert d.compute() == 4 assert b.tunable_variables == b.variables assert d.fixed_variables == b.variables", "import raises def test_finalize(): with raises(TypeError): finalize(1) a = variable(range(10), default=2) assert finalize(a)[finalize(a).value]", "4 assert b.fixed_variables == b.variables assert not b.tunable_variables assert len(b.functions) == 2 assert", "set([finalize(a).key, finalize(c).key]) assert b.tunable_variables == b.variables assert b.compute() == 4 assert b.fixed_variables ==", "b.fixed_variables == b.variables with raises(KeyError): b.fix(\"foo\") a = variable(range(10), uid=True) with raises(KeyError): finalize(a", "c) assert set(b.variables) == set([finalize(a).key, finalize(c).key]) assert b.tunable_variables == b.variables assert b.compute() ==", "tuneit.variable import * from tuneit.tunable import Tunable from tuneit.finalize import finalize from pytest", "== 4 assert b.tunable_variables == b.variables assert d.fixed_variables == b.variables b.fix(\"a\") b.fix(finalize(c).value, 1)", "tuneit.tunable import * from tuneit.variable import * from tuneit.tunable import Tunable from tuneit.finalize", "assert b.depends_on(finalize(a).value) b = b.copy(reset=True) assert b.tunable_variables == b.variables assert finalize(a).value.fixed d =", "b.tunable_variables == b.variables assert finalize(a).value.fixed d = b.copy() assert d.compute() == 4 assert", "raises(TypeError): finalize(1) a = variable(range(10), default=2) assert finalize(a)[finalize(a).value] == finalize(a).value c = variable(range(10))", "assert b.fixed_variables == b.variables with raises(KeyError): b.fix(\"foo\") a = variable(range(10), uid=True) with raises(KeyError):", "assert d.fixed_variables == b.variables b.fix(\"a\") b.fix(finalize(c).value, 1) assert b.compute() == 5 assert b.fixed_variables", "not b.depends_on(1) assert b.depends_on(a) assert b.depends_on(finalize(a).value) b = b.copy(reset=True) assert b.tunable_variables == b.variables", "assert b.depends_on(a) assert b.depends_on(finalize(a).value) b = b.copy(reset=True) assert b.tunable_variables == b.variables assert finalize(a).value.fixed", "finalize(a * a + c) assert set(b.variables) == set([finalize(a).key, finalize(c).key]) assert b.tunable_variables ==", "from tuneit.tunable import Tunable from tuneit.finalize import finalize from pytest import raises def", "Tunable from tuneit.finalize import finalize from pytest import raises def test_finalize(): with raises(TypeError):", "b.compute() == 5 assert b.fixed_variables == b.variables with raises(KeyError): b.fix(\"foo\") a = variable(range(10),", "b.depends_on(finalize(a).value) b = b.copy(reset=True) assert b.tunable_variables == b.variables assert finalize(a).value.fixed d = b.copy()", "pytest import raises def test_finalize(): with raises(TypeError): finalize(1) a = variable(range(10), default=2) assert", "assert b.tunable_variables == b.variables assert finalize(a).value.fixed d = b.copy() assert d.compute() == 4", "= finalize(a * a + c) assert set(b.variables) == set([finalize(a).key, finalize(c).key]) assert b.tunable_variables", "a + c) assert set(b.variables) == set([finalize(a).key, finalize(c).key]) assert b.tunable_variables == b.variables assert", "b.fixed_variables == b.variables assert not b.tunable_variables assert len(b.functions) == 2 assert not b.depends_on(1)", "== finalize(a).value c = variable(range(10)) b = finalize(a * a + c) assert", "tuneit.tunable import Tunable from tuneit.finalize import finalize from pytest import raises def test_finalize():", "assert d.compute() == 4 assert b.tunable_variables == b.variables assert d.fixed_variables == b.variables b.fix(\"a\")", "= variable(range(10)) b = finalize(a * a + c) assert set(b.variables) == set([finalize(a).key,", "assert b.compute() == 5 assert b.fixed_variables == b.variables with raises(KeyError): b.fix(\"foo\") a =", "assert b.tunable_variables == b.variables assert b.compute() == 4 assert b.fixed_variables == b.variables assert", "b.variables b.fix(\"a\") b.fix(finalize(c).value, 1) assert b.compute() == 5 assert b.fixed_variables == b.variables with", "assert finalize(a).value.fixed d = b.copy() assert d.compute() == 4 assert b.tunable_variables == b.variables", "== 4 assert b.fixed_variables == b.variables assert not b.tunable_variables assert len(b.functions) == 2", "finalize from pytest import raises def test_finalize(): with raises(TypeError): finalize(1) a = variable(range(10),", "b.copy(reset=True) assert b.tunable_variables == b.variables assert finalize(a).value.fixed d = b.copy() assert d.compute() ==", "b.depends_on(1) assert b.depends_on(a) assert b.depends_on(finalize(a).value) b = b.copy(reset=True) assert b.tunable_variables == b.variables assert", "test_finalize(): with raises(TypeError): finalize(1) a = variable(range(10), default=2) assert finalize(a)[finalize(a).value] == finalize(a).value c", "tuneit.finalize import finalize from pytest import raises def test_finalize(): with raises(TypeError): finalize(1) a", "c = variable(range(10)) b = finalize(a * a + c) assert set(b.variables) ==", "2 assert not b.depends_on(1) assert b.depends_on(a) assert b.depends_on(finalize(a).value) b = b.copy(reset=True) assert b.tunable_variables", "* from tuneit.variable import * from tuneit.tunable import Tunable from tuneit.finalize import finalize", "== b.variables assert finalize(a).value.fixed d = b.copy() assert d.compute() == 4 assert b.tunable_variables", "assert finalize(a)[finalize(a).value] == finalize(a).value c = variable(range(10)) b = finalize(a * a +", "default=2) assert finalize(a)[finalize(a).value] == finalize(a).value c = variable(range(10)) b = finalize(a * a", "b.tunable_variables == b.variables assert b.compute() == 4 assert b.fixed_variables == b.variables assert not", "* a + c) assert set(b.variables) == set([finalize(a).key, finalize(c).key]) assert b.tunable_variables == b.variables", "a = variable(range(10), default=2) assert finalize(a)[finalize(a).value] == finalize(a).value c = variable(range(10)) b =", "finalize(a).value c = variable(range(10)) b = finalize(a * a + c) assert set(b.variables)", "== b.variables with raises(KeyError): b.fix(\"foo\") a = variable(range(10), uid=True) with raises(KeyError): finalize(a *", "== b.variables assert not b.tunable_variables assert len(b.functions) == 2 assert not b.depends_on(1) assert", "from tuneit.variable import * from tuneit.tunable import Tunable from tuneit.finalize import finalize from", "with raises(TypeError): finalize(1) a = variable(range(10), default=2) assert finalize(a)[finalize(a).value] == finalize(a).value c =", "b.tunable_variables assert len(b.functions) == 2 assert not b.depends_on(1) assert b.depends_on(a) assert b.depends_on(finalize(a).value) b", "b.fix(finalize(c).value, 1) assert b.compute() == 5 assert b.fixed_variables == b.variables with raises(KeyError): b.fix(\"foo\")", "not b.tunable_variables assert len(b.functions) == 2 assert not b.depends_on(1) assert b.depends_on(a) assert b.depends_on(finalize(a).value)", "5 assert b.fixed_variables == b.variables with raises(KeyError): b.fix(\"foo\") a = variable(range(10), uid=True) with", "finalize(c).key]) assert b.tunable_variables == b.variables assert b.compute() == 4 assert b.fixed_variables == b.variables", "raises def test_finalize(): with raises(TypeError): finalize(1) a = variable(range(10), default=2) assert finalize(a)[finalize(a).value] ==", "== b.variables assert d.fixed_variables == b.variables b.fix(\"a\") b.fix(finalize(c).value, 1) assert b.compute() == 5", "b.variables assert d.fixed_variables == b.variables b.fix(\"a\") b.fix(finalize(c).value, 1) assert b.compute() == 5 assert", "d.compute() == 4 assert b.tunable_variables == b.variables assert d.fixed_variables == b.variables b.fix(\"a\") b.fix(finalize(c).value,", "4 assert b.tunable_variables == b.variables assert d.fixed_variables == b.variables b.fix(\"a\") b.fix(finalize(c).value, 1) assert", "b = b.copy(reset=True) assert b.tunable_variables == b.variables assert finalize(a).value.fixed d = b.copy() assert", "= variable(range(10), default=2) assert finalize(a)[finalize(a).value] == finalize(a).value c = variable(range(10)) b = finalize(a", "+ c) assert set(b.variables) == set([finalize(a).key, finalize(c).key]) assert b.tunable_variables == b.variables assert b.compute()", "variable(range(10)) b = finalize(a * a + c) assert set(b.variables) == set([finalize(a).key, finalize(c).key])", "= b.copy() assert d.compute() == 4 assert b.tunable_variables == b.variables assert d.fixed_variables ==", "== set([finalize(a).key, finalize(c).key]) assert b.tunable_variables == b.variables assert b.compute() == 4 assert b.fixed_variables", "visualize from tuneit.tunable import * from tuneit.variable import * from tuneit.tunable import Tunable", "d.fixed_variables == b.variables b.fix(\"a\") b.fix(finalize(c).value, 1) assert b.compute() == 5 assert b.fixed_variables ==", "import * from tuneit.tunable import Tunable from tuneit.finalize import finalize from pytest import", "b.fix(\"a\") b.fix(finalize(c).value, 1) assert b.compute() == 5 assert b.fixed_variables == b.variables with raises(KeyError):", "== b.variables b.fix(\"a\") b.fix(finalize(c).value, 1) assert b.compute() == 5 assert b.fixed_variables == b.variables", "b.variables assert not b.tunable_variables assert len(b.functions) == 2 assert not b.depends_on(1) assert b.depends_on(a)", "b.compute() == 4 assert b.fixed_variables == b.variables assert not b.tunable_variables assert len(b.functions) ==", "from tuneit.graph import visualize from tuneit.tunable import * from tuneit.variable import * from", "assert not b.tunable_variables assert len(b.functions) == 2 assert not b.depends_on(1) assert b.depends_on(a) assert", "b = finalize(a * a + c) assert set(b.variables) == set([finalize(a).key, finalize(c).key]) assert", "assert b.tunable_variables == b.variables assert d.fixed_variables == b.variables b.fix(\"a\") b.fix(finalize(c).value, 1) assert b.compute()", "set(b.variables) == set([finalize(a).key, finalize(c).key]) assert b.tunable_variables == b.variables assert b.compute() == 4 assert", "len(b.functions) == 2 assert not b.depends_on(1) assert b.depends_on(a) assert b.depends_on(finalize(a).value) b = b.copy(reset=True)", "= b.copy(reset=True) assert b.tunable_variables == b.variables assert finalize(a).value.fixed d = b.copy() assert d.compute()", "b.tunable_variables == b.variables assert d.fixed_variables == b.variables b.fix(\"a\") b.fix(finalize(c).value, 1) assert b.compute() ==", "== 5 assert b.fixed_variables == b.variables with raises(KeyError): b.fix(\"foo\") a = variable(range(10), uid=True)", "finalize(a)[finalize(a).value] == finalize(a).value c = variable(range(10)) b = finalize(a * a + c)", "from tuneit.tunable import * from tuneit.variable import * from tuneit.tunable import Tunable from", "tuneit.graph import visualize from tuneit.tunable import * from tuneit.variable import * from tuneit.tunable", "variable(range(10), default=2) assert finalize(a)[finalize(a).value] == finalize(a).value c = variable(range(10)) b = finalize(a *", "assert b.compute() == 4 assert b.fixed_variables == b.variables assert not b.tunable_variables assert len(b.functions)", "import visualize from tuneit.tunable import * from tuneit.variable import * from tuneit.tunable import", "1) assert b.compute() == 5 assert b.fixed_variables == b.variables with raises(KeyError): b.fix(\"foo\") a", "import * from tuneit.variable import * from tuneit.tunable import Tunable from tuneit.finalize import", "assert len(b.functions) == 2 assert not b.depends_on(1) assert b.depends_on(a) assert b.depends_on(finalize(a).value) b =", "assert not b.depends_on(1) assert b.depends_on(a) assert b.depends_on(finalize(a).value) b = b.copy(reset=True) assert b.tunable_variables ==", "b.variables with raises(KeyError): b.fix(\"foo\") a = variable(range(10), uid=True) with raises(KeyError): finalize(a * b).fix(\"a\")", "== b.variables assert b.compute() == 4 assert b.fixed_variables == b.variables assert not b.tunable_variables", "b.variables assert finalize(a).value.fixed d = b.copy() assert d.compute() == 4 assert b.tunable_variables ==", "assert b.fixed_variables == b.variables assert not b.tunable_variables assert len(b.functions) == 2 assert not", "import finalize from pytest import raises def test_finalize(): with raises(TypeError): finalize(1) a =", "assert set(b.variables) == set([finalize(a).key, finalize(c).key]) assert b.tunable_variables == b.variables assert b.compute() == 4", "from tuneit.finalize import finalize from pytest import raises def test_finalize(): with raises(TypeError): finalize(1)", "b.variables assert b.compute() == 4 assert b.fixed_variables == b.variables assert not b.tunable_variables assert", "d = b.copy() assert d.compute() == 4 assert b.tunable_variables == b.variables assert d.fixed_variables", "== 2 assert not b.depends_on(1) assert b.depends_on(a) assert b.depends_on(finalize(a).value) b = b.copy(reset=True) assert", "* from tuneit.tunable import Tunable from tuneit.finalize import finalize from pytest import raises", "b.depends_on(a) assert b.depends_on(finalize(a).value) b = b.copy(reset=True) assert b.tunable_variables == b.variables assert finalize(a).value.fixed d", "finalize(1) a = variable(range(10), default=2) assert finalize(a)[finalize(a).value] == finalize(a).value c = variable(range(10)) b" ]
[ "s3_hello(person_name): print('Hello There Person:', person_name) def s3_push_delete_local(local_file, bucket, bucket_filepath): print('def s3_push_delete_local(local_file, bucket, bucket_filepath):')", "def s3_hello(person_name): print('Hello There Person:', person_name) def s3_push_delete_local(local_file, bucket, bucket_filepath): print('def s3_push_delete_local(local_file, bucket,", "<gh_stars>0 def s3_hello(person_name): print('Hello There Person:', person_name) def s3_push_delete_local(local_file, bucket, bucket_filepath): print('def s3_push_delete_local(local_file," ]
[ "elif func == 'sin': outputs['y'] = np.sin(inputs['x']) else: outputs['y'] = func(inputs['x']) class TestOptionsDictionaryFeature(unittest.TestCase):", "OptionsDictionary, ExplicitComponent import unittest from six import PY3, assertRegex import numpy as np", "3.] prob.run_model() assert_rel_error(self, prob['y'], [2., 4., 6.]) def test_with_default(self): from openmdao.api import Problem", "prob = Problem() prob.model = LinearCombinationComp(a=2.) prob.setup() prob['x'] = 3 prob.run_model() self.assertEqual(prob['y'], 7.)", "prob.setup() prob['x'] = 0. prob.run_model() self.assertEqual(prob['y'], 1.) def myfunc(x): return x ** 2", "= Problem() prob.model = LinearCombinationComp(a=2.) prob.setup() prob['x'] = 3 prob.run_model() self.assertEqual(prob['y'], 7.) def", "default=1., type_=(int, float)) def setup(self): self.add_input('x') self.add_output('y') self.declare_partials('y', 'x', val=self.metadata['a']) def compute(self, inputs,", "prob = Problem() prob.model = UnitaryFunctionComp(func=myfunc) prob.setup() prob['x'] = 2. prob.run_model() self.assertEqual(prob['y'], 6.)", "outputs): outputs['y'] = self.metadata['a'] * inputs['x'] + self.metadata['b'] class UnitaryFunctionComp(ExplicitComponent): def initialize(self): from", "from openmdao.api import Problem prob = Problem() prob.model = UnitaryFunctionComp(func='cos') prob.setup() prob['x'] =", "self.declare_partials('y', 'x', val=self.metadata['a']) def compute(self, inputs, outputs): outputs['y'] = self.metadata['a'] * inputs['x'] +", "Problem prob = Problem() prob.model = VectorDoublingComp(size=3) prob.setup() prob['x'] = [1., 2., 3.]", "outputs): func = self.metadata['func'] if func == 'exp': outputs['y'] = np.exp(inputs['x']) elif func", "func == 'cos': outputs['y'] = np.cos(inputs['x']) elif func == 'sin': outputs['y'] = np.sin(inputs['x'])", "= self.metadata['size'] self.add_input('x', shape=size) self.add_output('y', shape=size) self.declare_partials('y', 'x', val=2., rows=np.arange(size), cols=np.arange(size)) def compute(self,", "unittest from six import PY3, assertRegex import numpy as np from openmdao.devtools.testutil import", "initialize(self): self.metadata.declare('a', default=1., type_=(int, float)) self.metadata.declare('b', default=1., type_=(int, float)) def setup(self): self.add_input('x') self.add_output('y')", "func = self.metadata['func'] if func == 'exp': outputs['y'] = np.exp(inputs['x']) elif func ==", "np.cos(inputs['x']) elif func == 'sin': outputs['y'] = np.sin(inputs['x']) else: outputs['y'] = func(inputs['x']) class", "= func(inputs['x']) class TestOptionsDictionaryFeature(unittest.TestCase): def test_simple(self): from openmdao.api import Problem prob = Problem()", "= np.cos(inputs['x']) elif func == 'sin': outputs['y'] = np.sin(inputs['x']) else: outputs['y'] = func(inputs['x'])", "prob.setup() prob['x'] = [1., 2., 3.] prob.run_model() assert_rel_error(self, prob['y'], [2., 4., 6.]) def", "prob.run_model() assert_rel_error(self, prob['y'], [2., 4., 6.]) def test_with_default(self): from openmdao.api import Problem prob", "self.metadata.declare('b', default=1., type_=(int, float)) def setup(self): self.add_input('x') self.add_output('y') self.declare_partials('y', 'x', val=self.metadata['a']) def compute(self,", "openmdao.api import Problem prob = Problem() prob.model = LinearCombinationComp(a=2.) prob.setup() prob['x'] = 3", "3 prob.run_model() self.assertEqual(prob['y'], 7.) def test_values_and_types(self): from openmdao.api import Problem prob = Problem()", "openmdao.api import Problem prob = Problem() prob.model = UnitaryFunctionComp(func='cos') prob.setup() prob['x'] = 0.", "self.metadata['a'] * inputs['x'] + self.metadata['b'] class UnitaryFunctionComp(ExplicitComponent): def initialize(self): from types import FunctionType", "from openmdao.api import Problem prob = Problem() prob.model = VectorDoublingComp(size=3) prob.setup() prob['x'] =", "class LinearCombinationComp(ExplicitComponent): def initialize(self): self.metadata.declare('a', default=1., type_=(int, float)) self.metadata.declare('b', default=1., type_=(int, float)) def", "test_simple(self): from openmdao.api import Problem prob = Problem() prob.model = VectorDoublingComp(size=3) prob.setup() prob['x']", "openmdao.api import Problem prob = Problem() prob.model = VectorDoublingComp(size=3) prob.setup() prob['x'] = [1.,", "import assert_rel_error class VectorDoublingComp(ExplicitComponent): def initialize(self): self.metadata.declare('size', type_=int) def setup(self): size = self.metadata['size']", "1.) def myfunc(x): return x ** 2 + 2 prob = Problem() prob.model", "func == 'exp': outputs['y'] = np.exp(inputs['x']) elif func == 'cos': outputs['y'] = np.cos(inputs['x'])", "'x', method='fd') def compute(self, inputs, outputs): func = self.metadata['func'] if func == 'exp':", "= np.exp(inputs['x']) elif func == 'cos': outputs['y'] = np.cos(inputs['x']) elif func == 'sin':", "self.metadata.declare('func', values=('exp', 'cos', 'sin'), type_=FunctionType) def setup(self): self.add_input('x') self.add_output('y') self.declare_partials('y', 'x', method='fd') def", "compute(self, inputs, outputs): outputs['y'] = 2 * inputs['x'] class LinearCombinationComp(ExplicitComponent): def initialize(self): self.metadata.declare('a',", "outputs['y'] = np.exp(inputs['x']) elif func == 'cos': outputs['y'] = np.cos(inputs['x']) elif func ==", "Problem() prob.model = VectorDoublingComp(size=3) prob.setup() prob['x'] = [1., 2., 3.] prob.run_model() assert_rel_error(self, prob['y'],", "from openmdao.devtools.testutil import assert_rel_error class VectorDoublingComp(ExplicitComponent): def initialize(self): self.metadata.declare('size', type_=int) def setup(self): size", "else: outputs['y'] = func(inputs['x']) class TestOptionsDictionaryFeature(unittest.TestCase): def test_simple(self): from openmdao.api import Problem prob", "def initialize(self): self.metadata.declare('a', default=1., type_=(int, float)) self.metadata.declare('b', default=1., type_=(int, float)) def setup(self): self.add_input('x')", "float)) self.metadata.declare('b', default=1., type_=(int, float)) def setup(self): self.add_input('x') self.add_output('y') self.declare_partials('y', 'x', val=self.metadata['a']) def", "def myfunc(x): return x ** 2 + 2 prob = Problem() prob.model =", "self.metadata['size'] self.add_input('x', shape=size) self.add_output('y', shape=size) self.declare_partials('y', 'x', val=2., rows=np.arange(size), cols=np.arange(size)) def compute(self, inputs,", "self.metadata.declare('size', type_=int) def setup(self): size = self.metadata['size'] self.add_input('x', shape=size) self.add_output('y', shape=size) self.declare_partials('y', 'x',", "class TestOptionsDictionaryFeature(unittest.TestCase): def test_simple(self): from openmdao.api import Problem prob = Problem() prob.model =", "= Problem() prob.model = UnitaryFunctionComp(func='cos') prob.setup() prob['x'] = 0. prob.run_model() self.assertEqual(prob['y'], 1.) def", "prob.model = VectorDoublingComp(size=3) prob.setup() prob['x'] = [1., 2., 3.] prob.run_model() assert_rel_error(self, prob['y'], [2.,", "myfunc(x): return x ** 2 + 2 prob = Problem() prob.model = UnitaryFunctionComp(func=myfunc)", "* inputs['x'] class LinearCombinationComp(ExplicitComponent): def initialize(self): self.metadata.declare('a', default=1., type_=(int, float)) self.metadata.declare('b', default=1., type_=(int,", "+ self.metadata['b'] class UnitaryFunctionComp(ExplicitComponent): def initialize(self): from types import FunctionType self.metadata.declare('func', values=('exp', 'cos',", "= 2 * inputs['x'] class LinearCombinationComp(ExplicitComponent): def initialize(self): self.metadata.declare('a', default=1., type_=(int, float)) self.metadata.declare('b',", "== 'cos': outputs['y'] = np.cos(inputs['x']) elif func == 'sin': outputs['y'] = np.sin(inputs['x']) else:", "prob = Problem() prob.model = VectorDoublingComp(size=3) prob.setup() prob['x'] = [1., 2., 3.] prob.run_model()", "[1., 2., 3.] prob.run_model() assert_rel_error(self, prob['y'], [2., 4., 6.]) def test_with_default(self): from openmdao.api", "np.sin(inputs['x']) else: outputs['y'] = func(inputs['x']) class TestOptionsDictionaryFeature(unittest.TestCase): def test_simple(self): from openmdao.api import Problem", "prob['y'], [2., 4., 6.]) def test_with_default(self): from openmdao.api import Problem prob = Problem()", "import Problem prob = Problem() prob.model = LinearCombinationComp(a=2.) prob.setup() prob['x'] = 3 prob.run_model()", "inputs, outputs): outputs['y'] = 2 * inputs['x'] class LinearCombinationComp(ExplicitComponent): def initialize(self): self.metadata.declare('a', default=1.,", "Problem() prob.model = LinearCombinationComp(a=2.) prob.setup() prob['x'] = 3 prob.run_model() self.assertEqual(prob['y'], 7.) def test_values_and_types(self):", "ExplicitComponent import unittest from six import PY3, assertRegex import numpy as np from", "== 'sin': outputs['y'] = np.sin(inputs['x']) else: outputs['y'] = func(inputs['x']) class TestOptionsDictionaryFeature(unittest.TestCase): def test_simple(self):", "type_=FunctionType) def setup(self): self.add_input('x') self.add_output('y') self.declare_partials('y', 'x', method='fd') def compute(self, inputs, outputs): func", "numpy as np from openmdao.devtools.testutil import assert_rel_error class VectorDoublingComp(ExplicitComponent): def initialize(self): self.metadata.declare('size', type_=int)", "prob.run_model() self.assertEqual(prob['y'], 7.) def test_values_and_types(self): from openmdao.api import Problem prob = Problem() prob.model", "test_values_and_types(self): from openmdao.api import Problem prob = Problem() prob.model = UnitaryFunctionComp(func='cos') prob.setup() prob['x']", "Problem prob = Problem() prob.model = UnitaryFunctionComp(func='cos') prob.setup() prob['x'] = 0. prob.run_model() self.assertEqual(prob['y'],", "elif func == 'cos': outputs['y'] = np.cos(inputs['x']) elif func == 'sin': outputs['y'] =", "import numpy as np from openmdao.devtools.testutil import assert_rel_error class VectorDoublingComp(ExplicitComponent): def initialize(self): self.metadata.declare('size',", "== 'exp': outputs['y'] = np.exp(inputs['x']) elif func == 'cos': outputs['y'] = np.cos(inputs['x']) elif", "LinearCombinationComp(ExplicitComponent): def initialize(self): self.metadata.declare('a', default=1., type_=(int, float)) self.metadata.declare('b', default=1., type_=(int, float)) def setup(self):", "self.metadata['b'] class UnitaryFunctionComp(ExplicitComponent): def initialize(self): from types import FunctionType self.metadata.declare('func', values=('exp', 'cos', 'sin'),", "from types import FunctionType self.metadata.declare('func', values=('exp', 'cos', 'sin'), type_=FunctionType) def setup(self): self.add_input('x') self.add_output('y')", "self.declare_partials('y', 'x', method='fd') def compute(self, inputs, outputs): func = self.metadata['func'] if func ==", "VectorDoublingComp(size=3) prob.setup() prob['x'] = [1., 2., 3.] prob.run_model() assert_rel_error(self, prob['y'], [2., 4., 6.])", "prob = Problem() prob.model = UnitaryFunctionComp(func='cos') prob.setup() prob['x'] = 0. prob.run_model() self.assertEqual(prob['y'], 1.)", "2 + 2 prob = Problem() prob.model = UnitaryFunctionComp(func=myfunc) prob.setup() prob['x'] = 2.", "'exp': outputs['y'] = np.exp(inputs['x']) elif func == 'cos': outputs['y'] = np.cos(inputs['x']) elif func", "np.exp(inputs['x']) elif func == 'cos': outputs['y'] = np.cos(inputs['x']) elif func == 'sin': outputs['y']", "2 prob = Problem() prob.model = UnitaryFunctionComp(func=myfunc) prob.setup() prob['x'] = 2. prob.run_model() self.assertEqual(prob['y'],", "= UnitaryFunctionComp(func=myfunc) prob.setup() prob['x'] = 2. prob.run_model() self.assertEqual(prob['y'], 6.) if __name__ == \"__main__\":", "class VectorDoublingComp(ExplicitComponent): def initialize(self): self.metadata.declare('size', type_=int) def setup(self): size = self.metadata['size'] self.add_input('x', shape=size)", "prob.model = LinearCombinationComp(a=2.) prob.setup() prob['x'] = 3 prob.run_model() self.assertEqual(prob['y'], 7.) def test_values_and_types(self): from", "import Problem prob = Problem() prob.model = VectorDoublingComp(size=3) prob.setup() prob['x'] = [1., 2.,", "'x', val=2., rows=np.arange(size), cols=np.arange(size)) def compute(self, inputs, outputs): outputs['y'] = 2 * inputs['x']", "types import FunctionType self.metadata.declare('func', values=('exp', 'cos', 'sin'), type_=FunctionType) def setup(self): self.add_input('x') self.add_output('y') self.declare_partials('y',", "import Problem prob = Problem() prob.model = UnitaryFunctionComp(func='cos') prob.setup() prob['x'] = 0. prob.run_model()", "type_=int) def setup(self): size = self.metadata['size'] self.add_input('x', shape=size) self.add_output('y', shape=size) self.declare_partials('y', 'x', val=2.,", "shape=size) self.add_output('y', shape=size) self.declare_partials('y', 'x', val=2., rows=np.arange(size), cols=np.arange(size)) def compute(self, inputs, outputs): outputs['y']", "cols=np.arange(size)) def compute(self, inputs, outputs): outputs['y'] = 2 * inputs['x'] class LinearCombinationComp(ExplicitComponent): def", "import PY3, assertRegex import numpy as np from openmdao.devtools.testutil import assert_rel_error class VectorDoublingComp(ExplicitComponent):", "VectorDoublingComp(ExplicitComponent): def initialize(self): self.metadata.declare('size', type_=int) def setup(self): size = self.metadata['size'] self.add_input('x', shape=size) self.add_output('y',", "self.add_output('y', shape=size) self.declare_partials('y', 'x', val=2., rows=np.arange(size), cols=np.arange(size)) def compute(self, inputs, outputs): outputs['y'] =", "setup(self): self.add_input('x') self.add_output('y') self.declare_partials('y', 'x', val=self.metadata['a']) def compute(self, inputs, outputs): outputs['y'] = self.metadata['a']", "self.add_output('y') self.declare_partials('y', 'x', val=self.metadata['a']) def compute(self, inputs, outputs): outputs['y'] = self.metadata['a'] * inputs['x']", "inputs['x'] + self.metadata['b'] class UnitaryFunctionComp(ExplicitComponent): def initialize(self): from types import FunctionType self.metadata.declare('func', values=('exp',", "= Problem() prob.model = VectorDoublingComp(size=3) prob.setup() prob['x'] = [1., 2., 3.] prob.run_model() assert_rel_error(self,", "self.assertEqual(prob['y'], 7.) def test_values_and_types(self): from openmdao.api import Problem prob = Problem() prob.model =", "if func == 'exp': outputs['y'] = np.exp(inputs['x']) elif func == 'cos': outputs['y'] =", "= [1., 2., 3.] prob.run_model() assert_rel_error(self, prob['y'], [2., 4., 6.]) def test_with_default(self): from", "Problem() prob.model = UnitaryFunctionComp(func=myfunc) prob.setup() prob['x'] = 2. prob.run_model() self.assertEqual(prob['y'], 6.) if __name__", "prob['x'] = 3 prob.run_model() self.assertEqual(prob['y'], 7.) def test_values_and_types(self): from openmdao.api import Problem prob", "= VectorDoublingComp(size=3) prob.setup() prob['x'] = [1., 2., 3.] prob.run_model() assert_rel_error(self, prob['y'], [2., 4.,", "initialize(self): self.metadata.declare('size', type_=int) def setup(self): size = self.metadata['size'] self.add_input('x', shape=size) self.add_output('y', shape=size) self.declare_partials('y',", "'cos': outputs['y'] = np.cos(inputs['x']) elif func == 'sin': outputs['y'] = np.sin(inputs['x']) else: outputs['y']", "setup(self): size = self.metadata['size'] self.add_input('x', shape=size) self.add_output('y', shape=size) self.declare_partials('y', 'x', val=2., rows=np.arange(size), cols=np.arange(size))", "= LinearCombinationComp(a=2.) prob.setup() prob['x'] = 3 prob.run_model() self.assertEqual(prob['y'], 7.) def test_values_and_types(self): from openmdao.api", "def setup(self): self.add_input('x') self.add_output('y') self.declare_partials('y', 'x', method='fd') def compute(self, inputs, outputs): func =", "PY3, assertRegex import numpy as np from openmdao.devtools.testutil import assert_rel_error class VectorDoublingComp(ExplicitComponent): def", "<filename>openmdao/utils/tests/test_options_dictionary_feature.py from openmdao.api import OptionsDictionary, ExplicitComponent import unittest from six import PY3, assertRegex", "outputs['y'] = func(inputs['x']) class TestOptionsDictionaryFeature(unittest.TestCase): def test_simple(self): from openmdao.api import Problem prob =", "7.) def test_values_and_types(self): from openmdao.api import Problem prob = Problem() prob.model = UnitaryFunctionComp(func='cos')", "prob.run_model() self.assertEqual(prob['y'], 1.) def myfunc(x): return x ** 2 + 2 prob =", "return x ** 2 + 2 prob = Problem() prob.model = UnitaryFunctionComp(func=myfunc) prob.setup()", "self.metadata['func'] if func == 'exp': outputs['y'] = np.exp(inputs['x']) elif func == 'cos': outputs['y']", "= UnitaryFunctionComp(func='cos') prob.setup() prob['x'] = 0. prob.run_model() self.assertEqual(prob['y'], 1.) def myfunc(x): return x", "type_=(int, float)) def setup(self): self.add_input('x') self.add_output('y') self.declare_partials('y', 'x', val=self.metadata['a']) def compute(self, inputs, outputs):", "'cos', 'sin'), type_=FunctionType) def setup(self): self.add_input('x') self.add_output('y') self.declare_partials('y', 'x', method='fd') def compute(self, inputs,", "six import PY3, assertRegex import numpy as np from openmdao.devtools.testutil import assert_rel_error class", "x ** 2 + 2 prob = Problem() prob.model = UnitaryFunctionComp(func=myfunc) prob.setup() prob['x']", "prob.setup() prob['x'] = 3 prob.run_model() self.assertEqual(prob['y'], 7.) def test_values_and_types(self): from openmdao.api import Problem", "* inputs['x'] + self.metadata['b'] class UnitaryFunctionComp(ExplicitComponent): def initialize(self): from types import FunctionType self.metadata.declare('func',", "prob.model = UnitaryFunctionComp(func='cos') prob.setup() prob['x'] = 0. prob.run_model() self.assertEqual(prob['y'], 1.) def myfunc(x): return", "import unittest from six import PY3, assertRegex import numpy as np from openmdao.devtools.testutil", "outputs['y'] = np.sin(inputs['x']) else: outputs['y'] = func(inputs['x']) class TestOptionsDictionaryFeature(unittest.TestCase): def test_simple(self): from openmdao.api", "= 0. prob.run_model() self.assertEqual(prob['y'], 1.) def myfunc(x): return x ** 2 + 2", "prob['x'] = 0. prob.run_model() self.assertEqual(prob['y'], 1.) def myfunc(x): return x ** 2 +", "def compute(self, inputs, outputs): outputs['y'] = self.metadata['a'] * inputs['x'] + self.metadata['b'] class UnitaryFunctionComp(ExplicitComponent):", "= self.metadata['a'] * inputs['x'] + self.metadata['b'] class UnitaryFunctionComp(ExplicitComponent): def initialize(self): from types import", "2., 3.] prob.run_model() assert_rel_error(self, prob['y'], [2., 4., 6.]) def test_with_default(self): from openmdao.api import", "0. prob.run_model() self.assertEqual(prob['y'], 1.) def myfunc(x): return x ** 2 + 2 prob", "shape=size) self.declare_partials('y', 'x', val=2., rows=np.arange(size), cols=np.arange(size)) def compute(self, inputs, outputs): outputs['y'] = 2", "= 3 prob.run_model() self.assertEqual(prob['y'], 7.) def test_values_and_types(self): from openmdao.api import Problem prob =", "inputs, outputs): func = self.metadata['func'] if func == 'exp': outputs['y'] = np.exp(inputs['x']) elif", "val=self.metadata['a']) def compute(self, inputs, outputs): outputs['y'] = self.metadata['a'] * inputs['x'] + self.metadata['b'] class", "def compute(self, inputs, outputs): outputs['y'] = 2 * inputs['x'] class LinearCombinationComp(ExplicitComponent): def initialize(self):", "= self.metadata['func'] if func == 'exp': outputs['y'] = np.exp(inputs['x']) elif func == 'cos':", "outputs['y'] = np.cos(inputs['x']) elif func == 'sin': outputs['y'] = np.sin(inputs['x']) else: outputs['y'] =", "LinearCombinationComp(a=2.) prob.setup() prob['x'] = 3 prob.run_model() self.assertEqual(prob['y'], 7.) def test_values_and_types(self): from openmdao.api import", "float)) def setup(self): self.add_input('x') self.add_output('y') self.declare_partials('y', 'x', val=self.metadata['a']) def compute(self, inputs, outputs): outputs['y']", "setup(self): self.add_input('x') self.add_output('y') self.declare_partials('y', 'x', method='fd') def compute(self, inputs, outputs): func = self.metadata['func']", "size = self.metadata['size'] self.add_input('x', shape=size) self.add_output('y', shape=size) self.declare_partials('y', 'x', val=2., rows=np.arange(size), cols=np.arange(size)) def", "self.declare_partials('y', 'x', val=2., rows=np.arange(size), cols=np.arange(size)) def compute(self, inputs, outputs): outputs['y'] = 2 *", "val=2., rows=np.arange(size), cols=np.arange(size)) def compute(self, inputs, outputs): outputs['y'] = 2 * inputs['x'] class", "self.add_input('x') self.add_output('y') self.declare_partials('y', 'x', method='fd') def compute(self, inputs, outputs): func = self.metadata['func'] if", "+ 2 prob = Problem() prob.model = UnitaryFunctionComp(func=myfunc) prob.setup() prob['x'] = 2. prob.run_model()", "prob.model = UnitaryFunctionComp(func=myfunc) prob.setup() prob['x'] = 2. prob.run_model() self.assertEqual(prob['y'], 6.) if __name__ ==", "from openmdao.api import Problem prob = Problem() prob.model = LinearCombinationComp(a=2.) prob.setup() prob['x'] =", "def test_values_and_types(self): from openmdao.api import Problem prob = Problem() prob.model = UnitaryFunctionComp(func='cos') prob.setup()", "FunctionType self.metadata.declare('func', values=('exp', 'cos', 'sin'), type_=FunctionType) def setup(self): self.add_input('x') self.add_output('y') self.declare_partials('y', 'x', method='fd')", "[2., 4., 6.]) def test_with_default(self): from openmdao.api import Problem prob = Problem() prob.model", "UnitaryFunctionComp(func='cos') prob.setup() prob['x'] = 0. prob.run_model() self.assertEqual(prob['y'], 1.) def myfunc(x): return x **", "'sin': outputs['y'] = np.sin(inputs['x']) else: outputs['y'] = func(inputs['x']) class TestOptionsDictionaryFeature(unittest.TestCase): def test_simple(self): from", "compute(self, inputs, outputs): outputs['y'] = self.metadata['a'] * inputs['x'] + self.metadata['b'] class UnitaryFunctionComp(ExplicitComponent): def", "outputs): outputs['y'] = 2 * inputs['x'] class LinearCombinationComp(ExplicitComponent): def initialize(self): self.metadata.declare('a', default=1., type_=(int,", "self.add_input('x', shape=size) self.add_output('y', shape=size) self.declare_partials('y', 'x', val=2., rows=np.arange(size), cols=np.arange(size)) def compute(self, inputs, outputs):", "def setup(self): size = self.metadata['size'] self.add_input('x', shape=size) self.add_output('y', shape=size) self.declare_partials('y', 'x', val=2., rows=np.arange(size),", "func(inputs['x']) class TestOptionsDictionaryFeature(unittest.TestCase): def test_simple(self): from openmdao.api import Problem prob = Problem() prob.model", "UnitaryFunctionComp(func=myfunc) prob.setup() prob['x'] = 2. prob.run_model() self.assertEqual(prob['y'], 6.) if __name__ == \"__main__\": unittest.main()", "assert_rel_error(self, prob['y'], [2., 4., 6.]) def test_with_default(self): from openmdao.api import Problem prob =", "initialize(self): from types import FunctionType self.metadata.declare('func', values=('exp', 'cos', 'sin'), type_=FunctionType) def setup(self): self.add_input('x')", "openmdao.api import OptionsDictionary, ExplicitComponent import unittest from six import PY3, assertRegex import numpy", "outputs['y'] = 2 * inputs['x'] class LinearCombinationComp(ExplicitComponent): def initialize(self): self.metadata.declare('a', default=1., type_=(int, float))", "inputs, outputs): outputs['y'] = self.metadata['a'] * inputs['x'] + self.metadata['b'] class UnitaryFunctionComp(ExplicitComponent): def initialize(self):", "import FunctionType self.metadata.declare('func', values=('exp', 'cos', 'sin'), type_=FunctionType) def setup(self): self.add_input('x') self.add_output('y') self.declare_partials('y', 'x',", "assert_rel_error class VectorDoublingComp(ExplicitComponent): def initialize(self): self.metadata.declare('size', type_=int) def setup(self): size = self.metadata['size'] self.add_input('x',", "default=1., type_=(int, float)) self.metadata.declare('b', default=1., type_=(int, float)) def setup(self): self.add_input('x') self.add_output('y') self.declare_partials('y', 'x',", "6.]) def test_with_default(self): from openmdao.api import Problem prob = Problem() prob.model = LinearCombinationComp(a=2.)", "def setup(self): self.add_input('x') self.add_output('y') self.declare_partials('y', 'x', val=self.metadata['a']) def compute(self, inputs, outputs): outputs['y'] =", "UnitaryFunctionComp(ExplicitComponent): def initialize(self): from types import FunctionType self.metadata.declare('func', values=('exp', 'cos', 'sin'), type_=FunctionType) def", "2 * inputs['x'] class LinearCombinationComp(ExplicitComponent): def initialize(self): self.metadata.declare('a', default=1., type_=(int, float)) self.metadata.declare('b', default=1.,", "def test_simple(self): from openmdao.api import Problem prob = Problem() prob.model = VectorDoublingComp(size=3) prob.setup()", "'sin'), type_=FunctionType) def setup(self): self.add_input('x') self.add_output('y') self.declare_partials('y', 'x', method='fd') def compute(self, inputs, outputs):", "prob['x'] = [1., 2., 3.] prob.run_model() assert_rel_error(self, prob['y'], [2., 4., 6.]) def test_with_default(self):", "inputs['x'] class LinearCombinationComp(ExplicitComponent): def initialize(self): self.metadata.declare('a', default=1., type_=(int, float)) self.metadata.declare('b', default=1., type_=(int, float))", "4., 6.]) def test_with_default(self): from openmdao.api import Problem prob = Problem() prob.model =", "self.metadata.declare('a', default=1., type_=(int, float)) self.metadata.declare('b', default=1., type_=(int, float)) def setup(self): self.add_input('x') self.add_output('y') self.declare_partials('y',", "self.add_input('x') self.add_output('y') self.declare_partials('y', 'x', val=self.metadata['a']) def compute(self, inputs, outputs): outputs['y'] = self.metadata['a'] *", "TestOptionsDictionaryFeature(unittest.TestCase): def test_simple(self): from openmdao.api import Problem prob = Problem() prob.model = VectorDoublingComp(size=3)", "test_with_default(self): from openmdao.api import Problem prob = Problem() prob.model = LinearCombinationComp(a=2.) prob.setup() prob['x']", "self.add_output('y') self.declare_partials('y', 'x', method='fd') def compute(self, inputs, outputs): func = self.metadata['func'] if func", "def initialize(self): self.metadata.declare('size', type_=int) def setup(self): size = self.metadata['size'] self.add_input('x', shape=size) self.add_output('y', shape=size)", "func == 'sin': outputs['y'] = np.sin(inputs['x']) else: outputs['y'] = func(inputs['x']) class TestOptionsDictionaryFeature(unittest.TestCase): def", "Problem() prob.model = UnitaryFunctionComp(func='cos') prob.setup() prob['x'] = 0. prob.run_model() self.assertEqual(prob['y'], 1.) def myfunc(x):", "compute(self, inputs, outputs): func = self.metadata['func'] if func == 'exp': outputs['y'] = np.exp(inputs['x'])", "def compute(self, inputs, outputs): func = self.metadata['func'] if func == 'exp': outputs['y'] =", "import OptionsDictionary, ExplicitComponent import unittest from six import PY3, assertRegex import numpy as", "self.assertEqual(prob['y'], 1.) def myfunc(x): return x ** 2 + 2 prob = Problem()", "type_=(int, float)) self.metadata.declare('b', default=1., type_=(int, float)) def setup(self): self.add_input('x') self.add_output('y') self.declare_partials('y', 'x', val=self.metadata['a'])", "np from openmdao.devtools.testutil import assert_rel_error class VectorDoublingComp(ExplicitComponent): def initialize(self): self.metadata.declare('size', type_=int) def setup(self):", "outputs['y'] = self.metadata['a'] * inputs['x'] + self.metadata['b'] class UnitaryFunctionComp(ExplicitComponent): def initialize(self): from types", "as np from openmdao.devtools.testutil import assert_rel_error class VectorDoublingComp(ExplicitComponent): def initialize(self): self.metadata.declare('size', type_=int) def", "from six import PY3, assertRegex import numpy as np from openmdao.devtools.testutil import assert_rel_error", "= Problem() prob.model = UnitaryFunctionComp(func=myfunc) prob.setup() prob['x'] = 2. prob.run_model() self.assertEqual(prob['y'], 6.) if", "openmdao.devtools.testutil import assert_rel_error class VectorDoublingComp(ExplicitComponent): def initialize(self): self.metadata.declare('size', type_=int) def setup(self): size =", "= np.sin(inputs['x']) else: outputs['y'] = func(inputs['x']) class TestOptionsDictionaryFeature(unittest.TestCase): def test_simple(self): from openmdao.api import", "'x', val=self.metadata['a']) def compute(self, inputs, outputs): outputs['y'] = self.metadata['a'] * inputs['x'] + self.metadata['b']", "def initialize(self): from types import FunctionType self.metadata.declare('func', values=('exp', 'cos', 'sin'), type_=FunctionType) def setup(self):", "from openmdao.api import OptionsDictionary, ExplicitComponent import unittest from six import PY3, assertRegex import", "rows=np.arange(size), cols=np.arange(size)) def compute(self, inputs, outputs): outputs['y'] = 2 * inputs['x'] class LinearCombinationComp(ExplicitComponent):", "** 2 + 2 prob = Problem() prob.model = UnitaryFunctionComp(func=myfunc) prob.setup() prob['x'] =", "assertRegex import numpy as np from openmdao.devtools.testutil import assert_rel_error class VectorDoublingComp(ExplicitComponent): def initialize(self):", "class UnitaryFunctionComp(ExplicitComponent): def initialize(self): from types import FunctionType self.metadata.declare('func', values=('exp', 'cos', 'sin'), type_=FunctionType)", "method='fd') def compute(self, inputs, outputs): func = self.metadata['func'] if func == 'exp': outputs['y']", "values=('exp', 'cos', 'sin'), type_=FunctionType) def setup(self): self.add_input('x') self.add_output('y') self.declare_partials('y', 'x', method='fd') def compute(self,", "def test_with_default(self): from openmdao.api import Problem prob = Problem() prob.model = LinearCombinationComp(a=2.) prob.setup()", "Problem prob = Problem() prob.model = LinearCombinationComp(a=2.) prob.setup() prob['x'] = 3 prob.run_model() self.assertEqual(prob['y']," ]
[ "metrics.set_meter_provider(MeterProvider(stateful=False)) # Track telemetry from the requests library RequestsInstrumentor().instrument() meter = RequestsInstrumentor().meter exporter", "All rights reserved. # Licensed under the MIT License. # pylint: disable=import-error #", "RequestsInstrumentor from opentelemetry.sdk.metrics import MeterProvider from azure_monitor import AzureMonitorMetricsExporter # Use the default", "import RequestsInstrumentor from opentelemetry.sdk.metrics import MeterProvider from azure_monitor import AzureMonitorMetricsExporter # Use the", "MIT License. # pylint: disable=import-error # pylint: disable=no-member # pylint: disable=no-name-in-module import time", "HERE>\" ) # Export standard metrics from requests library to Azure Monitor metrics.get_meter_provider().start_pipeline(meter,", "metrics from opentelemetry.instrumentation.requests import RequestsInstrumentor from opentelemetry.sdk.metrics import MeterProvider from azure_monitor import AzureMonitorMetricsExporter", "Corporation. All rights reserved. # Licensed under the MIT License. # pylint: disable=import-error", "opentelemetry.instrumentation.requests import RequestsInstrumentor from opentelemetry.sdk.metrics import MeterProvider from azure_monitor import AzureMonitorMetricsExporter # Use", "# Licensed under the MIT License. # pylint: disable=import-error # pylint: disable=no-member #", "for x in range(10): for y in range(10): requests.get(\"http://example.com\") time.sleep(2) time.sleep(5) input(\"Press any", "metrics from requests library to Azure Monitor metrics.get_meter_provider().start_pipeline(meter, exporter, 5) for x in", "from azure_monitor import AzureMonitorMetricsExporter # Use the default sdk implementation metrics.set_meter_provider(MeterProvider(stateful=False)) # Track", "Azure Monitor metrics.get_meter_provider().start_pipeline(meter, exporter, 5) for x in range(10): for y in range(10):", "License. # pylint: disable=import-error # pylint: disable=no-member # pylint: disable=no-name-in-module import time import", ") # Export standard metrics from requests library to Azure Monitor metrics.get_meter_provider().start_pipeline(meter, exporter,", "Monitor metrics.get_meter_provider().start_pipeline(meter, exporter, 5) for x in range(10): for y in range(10): requests.get(\"http://example.com\")", "RequestsInstrumentor().instrument() meter = RequestsInstrumentor().meter exporter = AzureMonitorMetricsExporter( connection_string=\"InstrumentationKey=<INSTRUMENTATION KEY HERE>\" ) # Export", "opentelemetry import metrics from opentelemetry.instrumentation.requests import RequestsInstrumentor from opentelemetry.sdk.metrics import MeterProvider from azure_monitor", "from opentelemetry.sdk.metrics import MeterProvider from azure_monitor import AzureMonitorMetricsExporter # Use the default sdk", "disable=no-name-in-module import time import requests from opentelemetry import metrics from opentelemetry.instrumentation.requests import RequestsInstrumentor", "from the requests library RequestsInstrumentor().instrument() meter = RequestsInstrumentor().meter exporter = AzureMonitorMetricsExporter( connection_string=\"InstrumentationKey=<INSTRUMENTATION KEY", "the MIT License. # pylint: disable=import-error # pylint: disable=no-member # pylint: disable=no-name-in-module import", "from requests library to Azure Monitor metrics.get_meter_provider().start_pipeline(meter, exporter, 5) for x in range(10):", "azure_monitor import AzureMonitorMetricsExporter # Use the default sdk implementation metrics.set_meter_provider(MeterProvider(stateful=False)) # Track telemetry", "standard metrics from requests library to Azure Monitor metrics.get_meter_provider().start_pipeline(meter, exporter, 5) for x", "metrics.get_meter_provider().start_pipeline(meter, exporter, 5) for x in range(10): for y in range(10): requests.get(\"http://example.com\") time.sleep(2)", "implementation metrics.set_meter_provider(MeterProvider(stateful=False)) # Track telemetry from the requests library RequestsInstrumentor().instrument() meter = RequestsInstrumentor().meter", "KEY HERE>\" ) # Export standard metrics from requests library to Azure Monitor", "Use the default sdk implementation metrics.set_meter_provider(MeterProvider(stateful=False)) # Track telemetry from the requests library", "telemetry from the requests library RequestsInstrumentor().instrument() meter = RequestsInstrumentor().meter exporter = AzureMonitorMetricsExporter( connection_string=\"InstrumentationKey=<INSTRUMENTATION", "opentelemetry.sdk.metrics import MeterProvider from azure_monitor import AzureMonitorMetricsExporter # Use the default sdk implementation", "# Export standard metrics from requests library to Azure Monitor metrics.get_meter_provider().start_pipeline(meter, exporter, 5)", "requests library RequestsInstrumentor().instrument() meter = RequestsInstrumentor().meter exporter = AzureMonitorMetricsExporter( connection_string=\"InstrumentationKey=<INSTRUMENTATION KEY HERE>\" )", "# Use the default sdk implementation metrics.set_meter_provider(MeterProvider(stateful=False)) # Track telemetry from the requests", "exporter, 5) for x in range(10): for y in range(10): requests.get(\"http://example.com\") time.sleep(2) time.sleep(5)", "disable=no-member # pylint: disable=no-name-in-module import time import requests from opentelemetry import metrics from", "from opentelemetry import metrics from opentelemetry.instrumentation.requests import RequestsInstrumentor from opentelemetry.sdk.metrics import MeterProvider from", "reserved. # Licensed under the MIT License. # pylint: disable=import-error # pylint: disable=no-member", "pylint: disable=no-member # pylint: disable=no-name-in-module import time import requests from opentelemetry import metrics", "default sdk implementation metrics.set_meter_provider(MeterProvider(stateful=False)) # Track telemetry from the requests library RequestsInstrumentor().instrument() meter", "to Azure Monitor metrics.get_meter_provider().start_pipeline(meter, exporter, 5) for x in range(10): for y in", "# pylint: disable=no-name-in-module import time import requests from opentelemetry import metrics from opentelemetry.instrumentation.requests", "library to Azure Monitor metrics.get_meter_provider().start_pipeline(meter, exporter, 5) for x in range(10): for y", "MeterProvider from azure_monitor import AzureMonitorMetricsExporter # Use the default sdk implementation metrics.set_meter_provider(MeterProvider(stateful=False)) #", "Licensed under the MIT License. # pylint: disable=import-error # pylint: disable=no-member # pylint:", "RequestsInstrumentor().meter exporter = AzureMonitorMetricsExporter( connection_string=\"InstrumentationKey=<INSTRUMENTATION KEY HERE>\" ) # Export standard metrics from", "5) for x in range(10): for y in range(10): requests.get(\"http://example.com\") time.sleep(2) time.sleep(5) input(\"Press", "import AzureMonitorMetricsExporter # Use the default sdk implementation metrics.set_meter_provider(MeterProvider(stateful=False)) # Track telemetry from", "library RequestsInstrumentor().instrument() meter = RequestsInstrumentor().meter exporter = AzureMonitorMetricsExporter( connection_string=\"InstrumentationKey=<INSTRUMENTATION KEY HERE>\" ) #", "from opentelemetry.instrumentation.requests import RequestsInstrumentor from opentelemetry.sdk.metrics import MeterProvider from azure_monitor import AzureMonitorMetricsExporter #", "AzureMonitorMetricsExporter( connection_string=\"InstrumentationKey=<INSTRUMENTATION KEY HERE>\" ) # Export standard metrics from requests library to", "AzureMonitorMetricsExporter # Use the default sdk implementation metrics.set_meter_provider(MeterProvider(stateful=False)) # Track telemetry from the", "disable=import-error # pylint: disable=no-member # pylint: disable=no-name-in-module import time import requests from opentelemetry", "pylint: disable=no-name-in-module import time import requests from opentelemetry import metrics from opentelemetry.instrumentation.requests import", "x in range(10): for y in range(10): requests.get(\"http://example.com\") time.sleep(2) time.sleep(5) input(\"Press any key", "= RequestsInstrumentor().meter exporter = AzureMonitorMetricsExporter( connection_string=\"InstrumentationKey=<INSTRUMENTATION KEY HERE>\" ) # Export standard metrics", "import time import requests from opentelemetry import metrics from opentelemetry.instrumentation.requests import RequestsInstrumentor from", "sdk implementation metrics.set_meter_provider(MeterProvider(stateful=False)) # Track telemetry from the requests library RequestsInstrumentor().instrument() meter =", "Microsoft Corporation. All rights reserved. # Licensed under the MIT License. # pylint:", "exporter = AzureMonitorMetricsExporter( connection_string=\"InstrumentationKey=<INSTRUMENTATION KEY HERE>\" ) # Export standard metrics from requests", "import requests from opentelemetry import metrics from opentelemetry.instrumentation.requests import RequestsInstrumentor from opentelemetry.sdk.metrics import", "meter = RequestsInstrumentor().meter exporter = AzureMonitorMetricsExporter( connection_string=\"InstrumentationKey=<INSTRUMENTATION KEY HERE>\" ) # Export standard", "# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT", "= AzureMonitorMetricsExporter( connection_string=\"InstrumentationKey=<INSTRUMENTATION KEY HERE>\" ) # Export standard metrics from requests library", "pylint: disable=import-error # pylint: disable=no-member # pylint: disable=no-name-in-module import time import requests from", "time import requests from opentelemetry import metrics from opentelemetry.instrumentation.requests import RequestsInstrumentor from opentelemetry.sdk.metrics", "the requests library RequestsInstrumentor().instrument() meter = RequestsInstrumentor().meter exporter = AzureMonitorMetricsExporter( connection_string=\"InstrumentationKey=<INSTRUMENTATION KEY HERE>\"", "import metrics from opentelemetry.instrumentation.requests import RequestsInstrumentor from opentelemetry.sdk.metrics import MeterProvider from azure_monitor import", "import MeterProvider from azure_monitor import AzureMonitorMetricsExporter # Use the default sdk implementation metrics.set_meter_provider(MeterProvider(stateful=False))", "requests library to Azure Monitor metrics.get_meter_provider().start_pipeline(meter, exporter, 5) for x in range(10): for", "Track telemetry from the requests library RequestsInstrumentor().instrument() meter = RequestsInstrumentor().meter exporter = AzureMonitorMetricsExporter(", "(c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. #", "Export standard metrics from requests library to Azure Monitor metrics.get_meter_provider().start_pipeline(meter, exporter, 5) for", "# Track telemetry from the requests library RequestsInstrumentor().instrument() meter = RequestsInstrumentor().meter exporter =", "in range(10): for y in range(10): requests.get(\"http://example.com\") time.sleep(2) time.sleep(5) input(\"Press any key to", "requests from opentelemetry import metrics from opentelemetry.instrumentation.requests import RequestsInstrumentor from opentelemetry.sdk.metrics import MeterProvider", "# pylint: disable=no-member # pylint: disable=no-name-in-module import time import requests from opentelemetry import", "rights reserved. # Licensed under the MIT License. # pylint: disable=import-error # pylint:", "under the MIT License. # pylint: disable=import-error # pylint: disable=no-member # pylint: disable=no-name-in-module", "the default sdk implementation metrics.set_meter_provider(MeterProvider(stateful=False)) # Track telemetry from the requests library RequestsInstrumentor().instrument()", "connection_string=\"InstrumentationKey=<INSTRUMENTATION KEY HERE>\" ) # Export standard metrics from requests library to Azure", "Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License.", "range(10): for y in range(10): requests.get(\"http://example.com\") time.sleep(2) time.sleep(5) input(\"Press any key to exit...\")", "# pylint: disable=import-error # pylint: disable=no-member # pylint: disable=no-name-in-module import time import requests" ]
[ "= get_current_commit() commit_applied = get_git_logs(last_commit_id, current_commit_id) if commit_applied: commit_applied = \"••• \" +", "@task def deploy(): try: target_host = env.hosts[0] except IndexError: target_host = 'dev' with", "error\".format(env.hosts[0])) raise SystemExit() else: return result def do_deploy(): if not exists(\"{}/{}\".format(HOST_API[env.hosts[0]]['dir'], LAST_CID_FILE)): save_last_commit()", "get_git_logs(last_commit_id, current_commit_id) if commit_applied: commit_applied = \"••• \" + commit_applied commit_applied = commit_applied.replace(\"\\n\",", "deploy(): try: target_host = env.hosts[0] except IndexError: target_host = 'dev' with cd(HOST_API[target_host]['dir']): do_deploy()", "restart_api(): pass def get_current_commit(): return run_cmd(\"git rev-parse HEAD\") def save_last_commit(): run_cmd(\"git rev-parse HEAD", "restart_api() send_commit_applied() save_last_commit() def run_testing(): pass def restart_api(): pass def get_current_commit(): return run_cmd(\"git", "username='Deployment', # as_user=True, icon_emoji=\":gear:\", **kargs ) except Exception: traceback.print_exc() sc = FabSlack() @task", "cls._instances: cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs) return cls._instances[cls] class FabSlack(metaclass=Singleton): sc = SlackClient(SLACK_API_KEY)", "'Detail: {}'.format(result), \"mrkdwn_in\": [\"text\", \"pretext\"] }] sc.send(attachments=attachments, text=\"Deploy to *{}* error\".format(env.hosts[0])) raise SystemExit()", "def send_commit_applied(): last_commit_id = get_last_commit() current_commit_id = get_current_commit() commit_applied = get_git_logs(last_commit_id, current_commit_id) if", "attachments = [ { \"color\": \"good\", \"title\": \"Commit applied:\", \"text\": commit_applied, }, ]", "{}\".format(LAST_CID_FILE)) def get_last_commit(): return run_cmd(\"cat {}\".format(LAST_CID_FILE)) def get_git_logs(last_commit_id, current_commit_id): return run_cmd(\"git log {}...{}", "FabSlack(metaclass=Singleton): sc = SlackClient(SLACK_API_KEY) def send(self, **kargs): try: self.sc.api_call( \"chat.postMessage\", channel=\"#log-info\", username='Deployment', #", "return result def do_deploy(): if not exists(\"{}/{}\".format(HOST_API[env.hosts[0]]['dir'], LAST_CID_FILE)): save_last_commit() run_cmd(\"git pull\") run_testing() restart_api()", "send(self, **kargs): try: self.sc.api_call( \"chat.postMessage\", channel=\"#log-info\", username='Deployment', # as_user=True, icon_emoji=\":gear:\", **kargs ) except", "commit_applied = commit_applied.replace(\"\\n\", \"\\n••• \") attachments = [ { \"color\": \"good\", \"title\": \"Commit", "@task def test(target_host): pass @task def set_host(target_host='dev'): \"\"\"Set host before deploy, NOTE: plz", "print output to terminal :return: Output string if capture=True or return nothing if", "fabfile_config import * import traceback from fabric.contrib.files import exists LAST_CID_FILE = \"last_commit_id.txt\" class", "warn_only=True, pty=False) if result.failed: print(result.stdout) attachments = [{ \"title\": 'Command: {}'.format(result.command), \"color\": \"danger\",", "[{ \"title\": 'Command: {}'.format(result.command), \"color\": \"danger\", \"pretext\": 'Detail: {}'.format(result), \"mrkdwn_in\": [\"text\", \"pretext\"] }]", "except Exception: traceback.print_exc() sc = FabSlack() @task def test(target_host): pass @task def set_host(target_host='dev'):", "send_commit_applied() save_last_commit() def run_testing(): pass def restart_api(): pass def get_current_commit(): return run_cmd(\"git rev-parse", "\"pretext\"] }] sc.send(attachments=attachments, text=\"Deploy to *{}* error\".format(env.hosts[0])) raise SystemExit() else: return result def", "\"••• \" + commit_applied commit_applied = commit_applied.replace(\"\\n\", \"\\n••• \") attachments = [ {", "> {}\".format(LAST_CID_FILE)) def get_last_commit(): return run_cmd(\"cat {}\".format(LAST_CID_FILE)) def get_git_logs(last_commit_id, current_commit_id): return run_cmd(\"git log", "or remote host name :param bool local_capture: If true then return output and", "{ \"color\": \"good\", \"title\": \"Commit applied:\", \"text\": commit_applied, }, ] sc.send(attachments=attachments, text=\"Deploy to", "try: target_host = env.hosts[0] except IndexError: target_host = 'dev' with cd(HOST_API[target_host]['dir']): do_deploy() def", "cd, env, task, run, settings, local from fabfile_config import * import traceback from", "channel=\"#log-info\", username='Deployment', # as_user=True, icon_emoji=\":gear:\", **kargs ) except Exception: traceback.print_exc() sc = FabSlack()", "def send(self, **kargs): try: self.sc.api_call( \"chat.postMessage\", channel=\"#log-info\", username='Deployment', # as_user=True, icon_emoji=\":gear:\", **kargs )", "string target_host: local or remote host name :param bool local_capture: If true then", "run(cmd, warn_only=True, pty=False) if result.failed: print(result.stdout) attachments = [{ \"title\": 'Command: {}'.format(result.command), \"color\":", "[\"text\", \"pretext\"] }] sc.send(attachments=attachments, text=\"Deploy to *{}* error\".format(env.hosts[0])) raise SystemExit() else: return result", "current_commit_id) if commit_applied: commit_applied = \"••• \" + commit_applied commit_applied = commit_applied.replace(\"\\n\", \"\\n•••", "settings, local from fabfile_config import * import traceback from fabric.contrib.files import exists LAST_CID_FILE", "\"last_commit_id.txt\" class Singleton(type): _instances = {} def __call__(cls, *args, **kwargs): if cls not", "__call__(cls, *args, **kwargs): if cls not in cls._instances: cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)", "'dev' with cd(HOST_API[target_host]['dir']): do_deploy() def run_cmd(cmd, target_host=None, local_capture=True): \"\"\" Run cmd base on", "SlackClient from fabric.api import cd, env, task, run, settings, local from fabfile_config import", "before deploy, NOTE: plz configure ssh config file on your local machine first.", "in cls._instances: cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs) return cls._instances[cls] class FabSlack(metaclass=Singleton): sc =", "then return output and not print anything to terminal, if false then print", "HEAD > {}\".format(LAST_CID_FILE)) def get_last_commit(): return run_cmd(\"cat {}\".format(LAST_CID_FILE)) def get_git_logs(last_commit_id, current_commit_id): return run_cmd(\"git", "env.hosts = [target_host] @task def deploy(): try: target_host = env.hosts[0] except IndexError: target_host", "\"\"\" Fabfile template for python3 \"\"\" # -*- coding: utf-8 -*- from __future__", "if commit_applied: commit_applied = \"••• \" + commit_applied commit_applied = commit_applied.replace(\"\\n\", \"\\n••• \")", "icon_emoji=\":gear:\", **kargs ) except Exception: traceback.print_exc() sc = FabSlack() @task def test(target_host): pass", "'' with settings(warn_only=True): fn = \"local\" if target_host == 'local' else \"run\" if", "to terminal :return: Output string if capture=True or return nothing if capture=false \"\"\"", "terminal screen :param string cmd: Command to run :param string target_host: local or", "save_last_commit(): run_cmd(\"git rev-parse HEAD > {}\".format(LAST_CID_FILE)) def get_last_commit(): return run_cmd(\"cat {}\".format(LAST_CID_FILE)) def get_git_logs(last_commit_id,", "'local': result = local(cmd, local_capture) # Do not print to terminal and get", "run_cmd(\"git log {}...{} --oneline --pretty=format:'%s'\".format(last_commit_id, current_commit_id)) def send_commit_applied(): last_commit_id = get_last_commit() current_commit_id =", "if false then print output to terminal :return: Output string if capture=True or", "else: result = run(cmd, warn_only=True, pty=False) if result.failed: print(result.stdout) attachments = [{ \"title\":", "run :param string target_host: local or remote host name :param bool local_capture: If", "attachments = [{ \"title\": 'Command: {}'.format(result.command), \"color\": \"danger\", \"pretext\": 'Detail: {}'.format(result), \"mrkdwn_in\": [\"text\",", "result.failed: print(result.stdout) attachments = [{ \"title\": 'Command: {}'.format(result.command), \"color\": \"danger\", \"pretext\": 'Detail: {}'.format(result),", "env.use_ssh_config = True env.hosts = [target_host] @task def deploy(): try: target_host = env.hosts[0]", "}] sc.send(attachments=attachments, text=\"Deploy to *{}* error\".format(env.hosts[0])) raise SystemExit() else: return result def do_deploy():", "string cmd: Command to run :param string target_host: local or remote host name", "def get_git_logs(last_commit_id, current_commit_id): return run_cmd(\"git log {}...{} --oneline --pretty=format:'%s'\".format(last_commit_id, current_commit_id)) def send_commit_applied(): last_commit_id", "= \"last_commit_id.txt\" class Singleton(type): _instances = {} def __call__(cls, *args, **kwargs): if cls", "from fabric.api import cd, env, task, run, settings, local from fabfile_config import *", "*args, **kwargs): if cls not in cls._instances: cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs) return", "do_deploy(): if not exists(\"{}/{}\".format(HOST_API[env.hosts[0]]['dir'], LAST_CID_FILE)): save_last_commit() run_cmd(\"git pull\") run_testing() restart_api() send_commit_applied() save_last_commit() def", "remote host name :param bool local_capture: If true then return output and not", "\"\"\"Set host before deploy, NOTE: plz configure ssh config file on your local", "pass @task def set_host(target_host='dev'): \"\"\"Set host before deploy, NOTE: plz configure ssh config", "local from fabfile_config import * import traceback from fabric.contrib.files import exists LAST_CID_FILE =", "fabric.api import cd, env, task, run, settings, local from fabfile_config import * import", "= run(cmd, warn_only=True, pty=False) if result.failed: print(result.stdout) attachments = [{ \"title\": 'Command: {}'.format(result.command),", "local_capture: If true then return output and not print anything to terminal, if", "or remote host and return output or print output to terminal screen :param", "def set_host(target_host='dev'): \"\"\"Set host before deploy, NOTE: plz configure ssh config file on", "\"title\": 'Command: {}'.format(result.command), \"color\": \"danger\", \"pretext\": 'Detail: {}'.format(result), \"mrkdwn_in\": [\"text\", \"pretext\"] }] sc.send(attachments=attachments,", "Eg use: `fab set_host:dev deploy` :param: target_host string \"\"\" env.use_ssh_config = True env.hosts", "'Command: {}'.format(result.command), \"color\": \"danger\", \"pretext\": 'Detail: {}'.format(result), \"mrkdwn_in\": [\"text\", \"pretext\"] }] sc.send(attachments=attachments, text=\"Deploy", "output else: result = run(cmd, warn_only=True, pty=False) if result.failed: print(result.stdout) attachments = [{", "cd(HOST_API[target_host]['dir']): do_deploy() def run_cmd(cmd, target_host=None, local_capture=True): \"\"\" Run cmd base on local or", "fn = \"local\" if target_host == 'local' else \"run\" if fn == 'local':", "if fn == 'local': result = local(cmd, local_capture) # Do not print to", ":return: Output string if capture=True or return nothing if capture=false \"\"\" result =", "return output or print output to terminal screen :param string cmd: Command to", "print to terminal and get the output else: result = run(cmd, warn_only=True, pty=False)", "SystemExit() else: return result def do_deploy(): if not exists(\"{}/{}\".format(HOST_API[env.hosts[0]]['dir'], LAST_CID_FILE)): save_last_commit() run_cmd(\"git pull\")", "python3 \"\"\" # -*- coding: utf-8 -*- from __future__ import print_function from slackclient", "screen :param string cmd: Command to run :param string target_host: local or remote", "run_cmd(\"cat {}\".format(LAST_CID_FILE)) def get_git_logs(last_commit_id, current_commit_id): return run_cmd(\"git log {}...{} --oneline --pretty=format:'%s'\".format(last_commit_id, current_commit_id)) def", "Fabfile template for python3 \"\"\" # -*- coding: utf-8 -*- from __future__ import", "def deploy(): try: target_host = env.hosts[0] except IndexError: target_host = 'dev' with cd(HOST_API[target_host]['dir']):", "-*- coding: utf-8 -*- from __future__ import print_function from slackclient import SlackClient from", "traceback.print_exc() sc = FabSlack() @task def test(target_host): pass @task def set_host(target_host='dev'): \"\"\"Set host", "file on your local machine first. Eg use: `fab set_host:dev deploy` :param: target_host", "output to terminal :return: Output string if capture=True or return nothing if capture=false", "**kargs ) except Exception: traceback.print_exc() sc = FabSlack() @task def test(target_host): pass @task", "class Singleton(type): _instances = {} def __call__(cls, *args, **kwargs): if cls not in", "send_commit_applied(): last_commit_id = get_last_commit() current_commit_id = get_current_commit() commit_applied = get_git_logs(last_commit_id, current_commit_id) if commit_applied:", "If true then return output and not print anything to terminal, if false", "*{}* error\".format(env.hosts[0])) raise SystemExit() else: return result def do_deploy(): if not exists(\"{}/{}\".format(HOST_API[env.hosts[0]]['dir'], LAST_CID_FILE)):", "set_host:dev deploy` :param: target_host string \"\"\" env.use_ssh_config = True env.hosts = [target_host] @task", "--pretty=format:'%s'\".format(last_commit_id, current_commit_id)) def send_commit_applied(): last_commit_id = get_last_commit() current_commit_id = get_current_commit() commit_applied = get_git_logs(last_commit_id,", "**kwargs) return cls._instances[cls] class FabSlack(metaclass=Singleton): sc = SlackClient(SLACK_API_KEY) def send(self, **kargs): try: self.sc.api_call(", "slackclient import SlackClient from fabric.api import cd, env, task, run, settings, local from", "raise SystemExit() else: return result def do_deploy(): if not exists(\"{}/{}\".format(HOST_API[env.hosts[0]]['dir'], LAST_CID_FILE)): save_last_commit() run_cmd(\"git", "def run_testing(): pass def restart_api(): pass def get_current_commit(): return run_cmd(\"git rev-parse HEAD\") def", "plz configure ssh config file on your local machine first. Eg use: `fab", "target_host = 'dev' with cd(HOST_API[target_host]['dir']): do_deploy() def run_cmd(cmd, target_host=None, local_capture=True): \"\"\" Run cmd", "**kwargs): if cls not in cls._instances: cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs) return cls._instances[cls]", "= [{ \"title\": 'Command: {}'.format(result.command), \"color\": \"danger\", \"pretext\": 'Detail: {}'.format(result), \"mrkdwn_in\": [\"text\", \"pretext\"]", "base on local or remote host and return output or print output to", "{}\".format(LAST_CID_FILE)) def get_git_logs(last_commit_id, current_commit_id): return run_cmd(\"git log {}...{} --oneline --pretty=format:'%s'\".format(last_commit_id, current_commit_id)) def send_commit_applied():", "run_testing(): pass def restart_api(): pass def get_current_commit(): return run_cmd(\"git rev-parse HEAD\") def save_last_commit():", "get_git_logs(last_commit_id, current_commit_id): return run_cmd(\"git log {}...{} --oneline --pretty=format:'%s'\".format(last_commit_id, current_commit_id)) def send_commit_applied(): last_commit_id =", "\"\"\" env.use_ssh_config = True env.hosts = [target_host] @task def deploy(): try: target_host =", "FabSlack() @task def test(target_host): pass @task def set_host(target_host='dev'): \"\"\"Set host before deploy, NOTE:", "IndexError: target_host = 'dev' with cd(HOST_API[target_host]['dir']): do_deploy() def run_cmd(cmd, target_host=None, local_capture=True): \"\"\" Run", "-*- from __future__ import print_function from slackclient import SlackClient from fabric.api import cd,", "= True env.hosts = [target_host] @task def deploy(): try: target_host = env.hosts[0] except", "name :param bool local_capture: If true then return output and not print anything", "fn == 'local': result = local(cmd, local_capture) # Do not print to terminal", "set_host(target_host='dev'): \"\"\"Set host before deploy, NOTE: plz configure ssh config file on your", "return nothing if capture=false \"\"\" result = '' with settings(warn_only=True): fn = \"local\"", "host name :param bool local_capture: If true then return output and not print", "deploy` :param: target_host string \"\"\" env.use_ssh_config = True env.hosts = [target_host] @task def", "output to terminal screen :param string cmd: Command to run :param string target_host:", "true then return output and not print anything to terminal, if false then", "Do not print to terminal and get the output else: result = run(cmd,", "coding: utf-8 -*- from __future__ import print_function from slackclient import SlackClient from fabric.api", "= '' with settings(warn_only=True): fn = \"local\" if target_host == 'local' else \"run\"", "\"chat.postMessage\", channel=\"#log-info\", username='Deployment', # as_user=True, icon_emoji=\":gear:\", **kargs ) except Exception: traceback.print_exc() sc =", "if capture=True or return nothing if capture=false \"\"\" result = '' with settings(warn_only=True):", "return output and not print anything to terminal, if false then print output", "cls).__call__(*args, **kwargs) return cls._instances[cls] class FabSlack(metaclass=Singleton): sc = SlackClient(SLACK_API_KEY) def send(self, **kargs): try:", "test(target_host): pass @task def set_host(target_host='dev'): \"\"\"Set host before deploy, NOTE: plz configure ssh", "= {} def __call__(cls, *args, **kwargs): if cls not in cls._instances: cls._instances[cls] =", ":param: target_host string \"\"\" env.use_ssh_config = True env.hosts = [target_host] @task def deploy():", "else \"run\" if fn == 'local': result = local(cmd, local_capture) # Do not", "\" + commit_applied commit_applied = commit_applied.replace(\"\\n\", \"\\n••• \") attachments = [ { \"color\":", "exists LAST_CID_FILE = \"last_commit_id.txt\" class Singleton(type): _instances = {} def __call__(cls, *args, **kwargs):", "commit_applied.replace(\"\\n\", \"\\n••• \") attachments = [ { \"color\": \"good\", \"title\": \"Commit applied:\", \"text\":", "to terminal and get the output else: result = run(cmd, warn_only=True, pty=False) if", "run, settings, local from fabfile_config import * import traceback from fabric.contrib.files import exists", "terminal, if false then print output to terminal :return: Output string if capture=True", "cls not in cls._instances: cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs) return cls._instances[cls] class FabSlack(metaclass=Singleton):", "except IndexError: target_host = 'dev' with cd(HOST_API[target_host]['dir']): do_deploy() def run_cmd(cmd, target_host=None, local_capture=True): \"\"\"", "terminal :return: Output string if capture=True or return nothing if capture=false \"\"\" result", "\"run\" if fn == 'local': result = local(cmd, local_capture) # Do not print", "if capture=false \"\"\" result = '' with settings(warn_only=True): fn = \"local\" if target_host", "= FabSlack() @task def test(target_host): pass @task def set_host(target_host='dev'): \"\"\"Set host before deploy,", "True env.hosts = [target_host] @task def deploy(): try: target_host = env.hosts[0] except IndexError:", "and not print anything to terminal, if false then print output to terminal", "\"color\": \"danger\", \"pretext\": 'Detail: {}'.format(result), \"mrkdwn_in\": [\"text\", \"pretext\"] }] sc.send(attachments=attachments, text=\"Deploy to *{}*", "Output string if capture=True or return nothing if capture=false \"\"\" result = ''", "def get_current_commit(): return run_cmd(\"git rev-parse HEAD\") def save_last_commit(): run_cmd(\"git rev-parse HEAD > {}\".format(LAST_CID_FILE))", "text=\"Deploy to *{}* error\".format(env.hosts[0])) raise SystemExit() else: return result def do_deploy(): if not", "not exists(\"{}/{}\".format(HOST_API[env.hosts[0]]['dir'], LAST_CID_FILE)): save_last_commit() run_cmd(\"git pull\") run_testing() restart_api() send_commit_applied() save_last_commit() def run_testing(): pass", "local_capture=True): \"\"\" Run cmd base on local or remote host and return output", "to *{}* error\".format(env.hosts[0])) raise SystemExit() else: return result def do_deploy(): if not exists(\"{}/{}\".format(HOST_API[env.hosts[0]]['dir'],", "get_current_commit(): return run_cmd(\"git rev-parse HEAD\") def save_last_commit(): run_cmd(\"git rev-parse HEAD > {}\".format(LAST_CID_FILE)) def", "def save_last_commit(): run_cmd(\"git rev-parse HEAD > {}\".format(LAST_CID_FILE)) def get_last_commit(): return run_cmd(\"cat {}\".format(LAST_CID_FILE)) def", "from fabfile_config import * import traceback from fabric.contrib.files import exists LAST_CID_FILE = \"last_commit_id.txt\"", "on your local machine first. Eg use: `fab set_host:dev deploy` :param: target_host string", "# as_user=True, icon_emoji=\":gear:\", **kargs ) except Exception: traceback.print_exc() sc = FabSlack() @task def", "host and return output or print output to terminal screen :param string cmd:", "print anything to terminal, if false then print output to terminal :return: Output", "false then print output to terminal :return: Output string if capture=True or return", "HEAD\") def save_last_commit(): run_cmd(\"git rev-parse HEAD > {}\".format(LAST_CID_FILE)) def get_last_commit(): return run_cmd(\"cat {}\".format(LAST_CID_FILE))", "* import traceback from fabric.contrib.files import exists LAST_CID_FILE = \"last_commit_id.txt\" class Singleton(type): _instances", "for python3 \"\"\" # -*- coding: utf-8 -*- from __future__ import print_function from", "template for python3 \"\"\" # -*- coding: utf-8 -*- from __future__ import print_function", "target_host == 'local' else \"run\" if fn == 'local': result = local(cmd, local_capture)", "LAST_CID_FILE)): save_last_commit() run_cmd(\"git pull\") run_testing() restart_api() send_commit_applied() save_last_commit() def run_testing(): pass def restart_api():", "\"good\", \"title\": \"Commit applied:\", \"text\": commit_applied, }, ] sc.send(attachments=attachments, text=\"Deploy to *{}* success\".format(env.hosts[0]))", "NOTE: plz configure ssh config file on your local machine first. Eg use:", "+ commit_applied commit_applied = commit_applied.replace(\"\\n\", \"\\n••• \") attachments = [ { \"color\": \"good\",", "--oneline --pretty=format:'%s'\".format(last_commit_id, current_commit_id)) def send_commit_applied(): last_commit_id = get_last_commit() current_commit_id = get_current_commit() commit_applied =", "run_testing() restart_api() send_commit_applied() save_last_commit() def run_testing(): pass def restart_api(): pass def get_current_commit(): return", "the output else: result = run(cmd, warn_only=True, pty=False) if result.failed: print(result.stdout) attachments =", "def __call__(cls, *args, **kwargs): if cls not in cls._instances: cls._instances[cls] = super(Singleton, cls).__call__(*args,", "\"danger\", \"pretext\": 'Detail: {}'.format(result), \"mrkdwn_in\": [\"text\", \"pretext\"] }] sc.send(attachments=attachments, text=\"Deploy to *{}* error\".format(env.hosts[0]))", "== 'local': result = local(cmd, local_capture) # Do not print to terminal and", "to terminal, if false then print output to terminal :return: Output string if", "= SlackClient(SLACK_API_KEY) def send(self, **kargs): try: self.sc.api_call( \"chat.postMessage\", channel=\"#log-info\", username='Deployment', # as_user=True, icon_emoji=\":gear:\",", "SlackClient(SLACK_API_KEY) def send(self, **kargs): try: self.sc.api_call( \"chat.postMessage\", channel=\"#log-info\", username='Deployment', # as_user=True, icon_emoji=\":gear:\", **kargs", "bool local_capture: If true then return output and not print anything to terminal,", "ssh config file on your local machine first. Eg use: `fab set_host:dev deploy`", "__future__ import print_function from slackclient import SlackClient from fabric.api import cd, env, task,", "with settings(warn_only=True): fn = \"local\" if target_host == 'local' else \"run\" if fn", "terminal and get the output else: result = run(cmd, warn_only=True, pty=False) if result.failed:", "local_capture) # Do not print to terminal and get the output else: result", "as_user=True, icon_emoji=\":gear:\", **kargs ) except Exception: traceback.print_exc() sc = FabSlack() @task def test(target_host):", "Exception: traceback.print_exc() sc = FabSlack() @task def test(target_host): pass @task def set_host(target_host='dev'): \"\"\"Set", "= local(cmd, local_capture) # Do not print to terminal and get the output", "and get the output else: result = run(cmd, warn_only=True, pty=False) if result.failed: print(result.stdout)", "nothing if capture=false \"\"\" result = '' with settings(warn_only=True): fn = \"local\" if", "output and not print anything to terminal, if false then print output to", "\"mrkdwn_in\": [\"text\", \"pretext\"] }] sc.send(attachments=attachments, text=\"Deploy to *{}* error\".format(env.hosts[0])) raise SystemExit() else: return", "output or print output to terminal screen :param string cmd: Command to run", "Run cmd base on local or remote host and return output or print", "= super(Singleton, cls).__call__(*args, **kwargs) return cls._instances[cls] class FabSlack(metaclass=Singleton): sc = SlackClient(SLACK_API_KEY) def send(self,", "{}...{} --oneline --pretty=format:'%s'\".format(last_commit_id, current_commit_id)) def send_commit_applied(): last_commit_id = get_last_commit() current_commit_id = get_current_commit() commit_applied", "def do_deploy(): if not exists(\"{}/{}\".format(HOST_API[env.hosts[0]]['dir'], LAST_CID_FILE)): save_last_commit() run_cmd(\"git pull\") run_testing() restart_api() send_commit_applied() save_last_commit()", "`fab set_host:dev deploy` :param: target_host string \"\"\" env.use_ssh_config = True env.hosts = [target_host]", "\"\"\" result = '' with settings(warn_only=True): fn = \"local\" if target_host == 'local'", "def restart_api(): pass def get_current_commit(): return run_cmd(\"git rev-parse HEAD\") def save_last_commit(): run_cmd(\"git rev-parse", "current_commit_id): return run_cmd(\"git log {}...{} --oneline --pretty=format:'%s'\".format(last_commit_id, current_commit_id)) def send_commit_applied(): last_commit_id = get_last_commit()", "not print to terminal and get the output else: result = run(cmd, warn_only=True,", "utf-8 -*- from __future__ import print_function from slackclient import SlackClient from fabric.api import", ") except Exception: traceback.print_exc() sc = FabSlack() @task def test(target_host): pass @task def", "get_last_commit(): return run_cmd(\"cat {}\".format(LAST_CID_FILE)) def get_git_logs(last_commit_id, current_commit_id): return run_cmd(\"git log {}...{} --oneline --pretty=format:'%s'\".format(last_commit_id,", "{}'.format(result.command), \"color\": \"danger\", \"pretext\": 'Detail: {}'.format(result), \"mrkdwn_in\": [\"text\", \"pretext\"] }] sc.send(attachments=attachments, text=\"Deploy to", "to run :param string target_host: local or remote host name :param bool local_capture:", "env, task, run, settings, local from fabfile_config import * import traceback from fabric.contrib.files", "and return output or print output to terminal screen :param string cmd: Command", "from __future__ import print_function from slackclient import SlackClient from fabric.api import cd, env,", "anything to terminal, if false then print output to terminal :return: Output string", "get the output else: result = run(cmd, warn_only=True, pty=False) if result.failed: print(result.stdout) attachments", "get_current_commit() commit_applied = get_git_logs(last_commit_id, current_commit_id) if commit_applied: commit_applied = \"••• \" + commit_applied", "pass def restart_api(): pass def get_current_commit(): return run_cmd(\"git rev-parse HEAD\") def save_last_commit(): run_cmd(\"git", "result = local(cmd, local_capture) # Do not print to terminal and get the", "[target_host] @task def deploy(): try: target_host = env.hosts[0] except IndexError: target_host = 'dev'", "\"\"\" Run cmd base on local or remote host and return output or", "import exists LAST_CID_FILE = \"last_commit_id.txt\" class Singleton(type): _instances = {} def __call__(cls, *args,", "run_cmd(\"git rev-parse HEAD\") def save_last_commit(): run_cmd(\"git rev-parse HEAD > {}\".format(LAST_CID_FILE)) def get_last_commit(): return", "then print output to terminal :return: Output string if capture=True or return nothing", "pull\") run_testing() restart_api() send_commit_applied() save_last_commit() def run_testing(): pass def restart_api(): pass def get_current_commit():", "pass def get_current_commit(): return run_cmd(\"git rev-parse HEAD\") def save_last_commit(): run_cmd(\"git rev-parse HEAD >", "fabric.contrib.files import exists LAST_CID_FILE = \"last_commit_id.txt\" class Singleton(type): _instances = {} def __call__(cls,", "sc = FabSlack() @task def test(target_host): pass @task def set_host(target_host='dev'): \"\"\"Set host before", "or return nothing if capture=false \"\"\" result = '' with settings(warn_only=True): fn =", "= \"local\" if target_host == 'local' else \"run\" if fn == 'local': result", "{} def __call__(cls, *args, **kwargs): if cls not in cls._instances: cls._instances[cls] = super(Singleton,", "save_last_commit() run_cmd(\"git pull\") run_testing() restart_api() send_commit_applied() save_last_commit() def run_testing(): pass def restart_api(): pass", "if result.failed: print(result.stdout) attachments = [{ \"title\": 'Command: {}'.format(result.command), \"color\": \"danger\", \"pretext\": 'Detail:", "local machine first. Eg use: `fab set_host:dev deploy` :param: target_host string \"\"\" env.use_ssh_config", "# Do not print to terminal and get the output else: result =", "last_commit_id = get_last_commit() current_commit_id = get_current_commit() commit_applied = get_git_logs(last_commit_id, current_commit_id) if commit_applied: commit_applied", "import SlackClient from fabric.api import cd, env, task, run, settings, local from fabfile_config", "local or remote host name :param bool local_capture: If true then return output", "to terminal screen :param string cmd: Command to run :param string target_host: local", "string \"\"\" env.use_ssh_config = True env.hosts = [target_host] @task def deploy(): try: target_host", ":param string target_host: local or remote host name :param bool local_capture: If true", "commit_applied commit_applied = commit_applied.replace(\"\\n\", \"\\n••• \") attachments = [ { \"color\": \"good\", \"title\":", "super(Singleton, cls).__call__(*args, **kwargs) return cls._instances[cls] class FabSlack(metaclass=Singleton): sc = SlackClient(SLACK_API_KEY) def send(self, **kargs):", "capture=True or return nothing if capture=false \"\"\" result = '' with settings(warn_only=True): fn", "remote host and return output or print output to terminal screen :param string", "\"pretext\": 'Detail: {}'.format(result), \"mrkdwn_in\": [\"text\", \"pretext\"] }] sc.send(attachments=attachments, text=\"Deploy to *{}* error\".format(env.hosts[0])) raise", "def get_last_commit(): return run_cmd(\"cat {}\".format(LAST_CID_FILE)) def get_git_logs(last_commit_id, current_commit_id): return run_cmd(\"git log {}...{} --oneline", "= [ { \"color\": \"good\", \"title\": \"Commit applied:\", \"text\": commit_applied, }, ] sc.send(attachments=attachments,", "{}'.format(result), \"mrkdwn_in\": [\"text\", \"pretext\"] }] sc.send(attachments=attachments, text=\"Deploy to *{}* error\".format(env.hosts[0])) raise SystemExit() else:", "if cls not in cls._instances: cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs) return cls._instances[cls] class", "target_host: local or remote host name :param bool local_capture: If true then return", "print output to terminal screen :param string cmd: Command to run :param string", "def test(target_host): pass @task def set_host(target_host='dev'): \"\"\"Set host before deploy, NOTE: plz configure", "return run_cmd(\"cat {}\".format(LAST_CID_FILE)) def get_git_logs(last_commit_id, current_commit_id): return run_cmd(\"git log {}...{} --oneline --pretty=format:'%s'\".format(last_commit_id, current_commit_id))", "rev-parse HEAD\") def save_last_commit(): run_cmd(\"git rev-parse HEAD > {}\".format(LAST_CID_FILE)) def get_last_commit(): return run_cmd(\"cat", "task, run, settings, local from fabfile_config import * import traceback from fabric.contrib.files import", "import traceback from fabric.contrib.files import exists LAST_CID_FILE = \"last_commit_id.txt\" class Singleton(type): _instances =", "== 'local' else \"run\" if fn == 'local': result = local(cmd, local_capture) #", "def run_cmd(cmd, target_host=None, local_capture=True): \"\"\" Run cmd base on local or remote host", "run_cmd(cmd, target_host=None, local_capture=True): \"\"\" Run cmd base on local or remote host and", "target_host string \"\"\" env.use_ssh_config = True env.hosts = [target_host] @task def deploy(): try:", "traceback from fabric.contrib.files import exists LAST_CID_FILE = \"last_commit_id.txt\" class Singleton(type): _instances = {}", "deploy, NOTE: plz configure ssh config file on your local machine first. Eg", "\"\\n••• \") attachments = [ { \"color\": \"good\", \"title\": \"Commit applied:\", \"text\": commit_applied,", "log {}...{} --oneline --pretty=format:'%s'\".format(last_commit_id, current_commit_id)) def send_commit_applied(): last_commit_id = get_last_commit() current_commit_id = get_current_commit()", "import * import traceback from fabric.contrib.files import exists LAST_CID_FILE = \"last_commit_id.txt\" class Singleton(type):", "'local' else \"run\" if fn == 'local': result = local(cmd, local_capture) # Do", "import cd, env, task, run, settings, local from fabfile_config import * import traceback", "print_function from slackclient import SlackClient from fabric.api import cd, env, task, run, settings,", "host before deploy, NOTE: plz configure ssh config file on your local machine", "cmd base on local or remote host and return output or print output", "Singleton(type): _instances = {} def __call__(cls, *args, **kwargs): if cls not in cls._instances:", "cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs) return cls._instances[cls] class FabSlack(metaclass=Singleton): sc = SlackClient(SLACK_API_KEY) def", "exists(\"{}/{}\".format(HOST_API[env.hosts[0]]['dir'], LAST_CID_FILE)): save_last_commit() run_cmd(\"git pull\") run_testing() restart_api() send_commit_applied() save_last_commit() def run_testing(): pass def", "_instances = {} def __call__(cls, *args, **kwargs): if cls not in cls._instances: cls._instances[cls]", "Command to run :param string target_host: local or remote host name :param bool", "else: return result def do_deploy(): if not exists(\"{}/{}\".format(HOST_API[env.hosts[0]]['dir'], LAST_CID_FILE)): save_last_commit() run_cmd(\"git pull\") run_testing()", "save_last_commit() def run_testing(): pass def restart_api(): pass def get_current_commit(): return run_cmd(\"git rev-parse HEAD\")", "try: self.sc.api_call( \"chat.postMessage\", channel=\"#log-info\", username='Deployment', # as_user=True, icon_emoji=\":gear:\", **kargs ) except Exception: traceback.print_exc()", "current_commit_id)) def send_commit_applied(): last_commit_id = get_last_commit() current_commit_id = get_current_commit() commit_applied = get_git_logs(last_commit_id, current_commit_id)", "sc = SlackClient(SLACK_API_KEY) def send(self, **kargs): try: self.sc.api_call( \"chat.postMessage\", channel=\"#log-info\", username='Deployment', # as_user=True,", "= 'dev' with cd(HOST_API[target_host]['dir']): do_deploy() def run_cmd(cmd, target_host=None, local_capture=True): \"\"\" Run cmd base", "settings(warn_only=True): fn = \"local\" if target_host == 'local' else \"run\" if fn ==", "target_host = env.hosts[0] except IndexError: target_host = 'dev' with cd(HOST_API[target_host]['dir']): do_deploy() def run_cmd(cmd,", "target_host=None, local_capture=True): \"\"\" Run cmd base on local or remote host and return", "local(cmd, local_capture) # Do not print to terminal and get the output else:", "get_last_commit() current_commit_id = get_current_commit() commit_applied = get_git_logs(last_commit_id, current_commit_id) if commit_applied: commit_applied = \"•••", "= [target_host] @task def deploy(): try: target_host = env.hosts[0] except IndexError: target_host =", "\"\"\" # -*- coding: utf-8 -*- from __future__ import print_function from slackclient import", ":param string cmd: Command to run :param string target_host: local or remote host", "machine first. Eg use: `fab set_host:dev deploy` :param: target_host string \"\"\" env.use_ssh_config =", "result = '' with settings(warn_only=True): fn = \"local\" if target_host == 'local' else", "return run_cmd(\"git rev-parse HEAD\") def save_last_commit(): run_cmd(\"git rev-parse HEAD > {}\".format(LAST_CID_FILE)) def get_last_commit():", "# -*- coding: utf-8 -*- from __future__ import print_function from slackclient import SlackClient", "**kargs): try: self.sc.api_call( \"chat.postMessage\", channel=\"#log-info\", username='Deployment', # as_user=True, icon_emoji=\":gear:\", **kargs ) except Exception:", "run_cmd(\"git rev-parse HEAD > {}\".format(LAST_CID_FILE)) def get_last_commit(): return run_cmd(\"cat {}\".format(LAST_CID_FILE)) def get_git_logs(last_commit_id, current_commit_id):", "commit_applied = \"••• \" + commit_applied commit_applied = commit_applied.replace(\"\\n\", \"\\n••• \") attachments =", "cls._instances[cls] class FabSlack(metaclass=Singleton): sc = SlackClient(SLACK_API_KEY) def send(self, **kargs): try: self.sc.api_call( \"chat.postMessage\", channel=\"#log-info\",", "or print output to terminal screen :param string cmd: Command to run :param", "capture=false \"\"\" result = '' with settings(warn_only=True): fn = \"local\" if target_host ==", ":param bool local_capture: If true then return output and not print anything to", "sc.send(attachments=attachments, text=\"Deploy to *{}* error\".format(env.hosts[0])) raise SystemExit() else: return result def do_deploy(): if", "first. Eg use: `fab set_host:dev deploy` :param: target_host string \"\"\" env.use_ssh_config = True", "pty=False) if result.failed: print(result.stdout) attachments = [{ \"title\": 'Command: {}'.format(result.command), \"color\": \"danger\", \"pretext\":", "commit_applied = get_git_logs(last_commit_id, current_commit_id) if commit_applied: commit_applied = \"••• \" + commit_applied commit_applied", "with cd(HOST_API[target_host]['dir']): do_deploy() def run_cmd(cmd, target_host=None, local_capture=True): \"\"\" Run cmd base on local", "= \"••• \" + commit_applied commit_applied = commit_applied.replace(\"\\n\", \"\\n••• \") attachments = [", "@task def set_host(target_host='dev'): \"\"\"Set host before deploy, NOTE: plz configure ssh config file", "rev-parse HEAD > {}\".format(LAST_CID_FILE)) def get_last_commit(): return run_cmd(\"cat {}\".format(LAST_CID_FILE)) def get_git_logs(last_commit_id, current_commit_id): return", "not in cls._instances: cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs) return cls._instances[cls] class FabSlack(metaclass=Singleton): sc", "not print anything to terminal, if false then print output to terminal :return:", "= get_git_logs(last_commit_id, current_commit_id) if commit_applied: commit_applied = \"••• \" + commit_applied commit_applied =", "if target_host == 'local' else \"run\" if fn == 'local': result = local(cmd,", "class FabSlack(metaclass=Singleton): sc = SlackClient(SLACK_API_KEY) def send(self, **kargs): try: self.sc.api_call( \"chat.postMessage\", channel=\"#log-info\", username='Deployment',", "string if capture=True or return nothing if capture=false \"\"\" result = '' with", "commit_applied: commit_applied = \"••• \" + commit_applied commit_applied = commit_applied.replace(\"\\n\", \"\\n••• \") attachments", "cmd: Command to run :param string target_host: local or remote host name :param", "from slackclient import SlackClient from fabric.api import cd, env, task, run, settings, local", "print(result.stdout) attachments = [{ \"title\": 'Command: {}'.format(result.command), \"color\": \"danger\", \"pretext\": 'Detail: {}'.format(result), \"mrkdwn_in\":", "result def do_deploy(): if not exists(\"{}/{}\".format(HOST_API[env.hosts[0]]['dir'], LAST_CID_FILE)): save_last_commit() run_cmd(\"git pull\") run_testing() restart_api() send_commit_applied()", "config file on your local machine first. Eg use: `fab set_host:dev deploy` :param:", "result = run(cmd, warn_only=True, pty=False) if result.failed: print(result.stdout) attachments = [{ \"title\": 'Command:", "configure ssh config file on your local machine first. Eg use: `fab set_host:dev", "do_deploy() def run_cmd(cmd, target_host=None, local_capture=True): \"\"\" Run cmd base on local or remote", "= env.hosts[0] except IndexError: target_host = 'dev' with cd(HOST_API[target_host]['dir']): do_deploy() def run_cmd(cmd, target_host=None,", "\"local\" if target_host == 'local' else \"run\" if fn == 'local': result =", "LAST_CID_FILE = \"last_commit_id.txt\" class Singleton(type): _instances = {} def __call__(cls, *args, **kwargs): if", "[ { \"color\": \"good\", \"title\": \"Commit applied:\", \"text\": commit_applied, }, ] sc.send(attachments=attachments, text=\"Deploy", "= get_last_commit() current_commit_id = get_current_commit() commit_applied = get_git_logs(last_commit_id, current_commit_id) if commit_applied: commit_applied =", "run_cmd(\"git pull\") run_testing() restart_api() send_commit_applied() save_last_commit() def run_testing(): pass def restart_api(): pass def", "if not exists(\"{}/{}\".format(HOST_API[env.hosts[0]]['dir'], LAST_CID_FILE)): save_last_commit() run_cmd(\"git pull\") run_testing() restart_api() send_commit_applied() save_last_commit() def run_testing():", "return cls._instances[cls] class FabSlack(metaclass=Singleton): sc = SlackClient(SLACK_API_KEY) def send(self, **kargs): try: self.sc.api_call( \"chat.postMessage\",", "on local or remote host and return output or print output to terminal", "use: `fab set_host:dev deploy` :param: target_host string \"\"\" env.use_ssh_config = True env.hosts =", "import print_function from slackclient import SlackClient from fabric.api import cd, env, task, run,", "\") attachments = [ { \"color\": \"good\", \"title\": \"Commit applied:\", \"text\": commit_applied, },", "env.hosts[0] except IndexError: target_host = 'dev' with cd(HOST_API[target_host]['dir']): do_deploy() def run_cmd(cmd, target_host=None, local_capture=True):", "current_commit_id = get_current_commit() commit_applied = get_git_logs(last_commit_id, current_commit_id) if commit_applied: commit_applied = \"••• \"", "self.sc.api_call( \"chat.postMessage\", channel=\"#log-info\", username='Deployment', # as_user=True, icon_emoji=\":gear:\", **kargs ) except Exception: traceback.print_exc() sc", "local or remote host and return output or print output to terminal screen", "\"color\": \"good\", \"title\": \"Commit applied:\", \"text\": commit_applied, }, ] sc.send(attachments=attachments, text=\"Deploy to *{}*", "your local machine first. Eg use: `fab set_host:dev deploy` :param: target_host string \"\"\"", "from fabric.contrib.files import exists LAST_CID_FILE = \"last_commit_id.txt\" class Singleton(type): _instances = {} def", "= commit_applied.replace(\"\\n\", \"\\n••• \") attachments = [ { \"color\": \"good\", \"title\": \"Commit applied:\",", "return run_cmd(\"git log {}...{} --oneline --pretty=format:'%s'\".format(last_commit_id, current_commit_id)) def send_commit_applied(): last_commit_id = get_last_commit() current_commit_id" ]
[ "'app', [App, None], None, ), # 3 (4, TType.STRUCT, 'user', [User, None], None,", "- action - request - context \"\"\" def __init__(self, action=None, request=None, context=None,): self.action", "1 (2, TType.STRING, 'type', 'UTF8', None, ), # 2 (3, TType.MAP, 'config', (TType.STRING,", "if self.url is not None: oprot.writeFieldBegin('url', TType.STRING, 5) oprot.writeString(self.url.encode('utf-8') if sys.version_info[0] == 2", "'UTF8', TType.STRING, 'UTF8', False), None, ), # 2 (3, TType.MAP, 'uriFragments', (TType.STRING, 'UTF8',", "class Message(object): \"\"\" Attributes: - success - message \"\"\" def __init__(self, success=None, message=None,):", "ftype == TType.STRING: self.arguments = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()", "oprot.writeStructBegin('RpcRequest') if self.arguments is not None: oprot.writeFieldBegin('arguments', TType.STRING, 1) oprot.writeString(self.arguments.encode('utf-8') if sys.version_info[0] ==", "iprot.readString() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRUCT: self.request =", "iprot.readStructEnd() def write(self, oprot): if oprot._fast_encode is not None and self.thrift_spec is not", "def __ne__(self, other): return not (self == other) class App(object): \"\"\" Attributes: -", "== TType.LIST: self.logs = [] (_etype59, _size56) = iprot.readListBegin() for _i60 in range(_size56):", "NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING", "== 3: if ftype == TType.MAP: self.config = {} (_ktype1, _vtype2, _size0) =", "fid == 2: if ftype == TType.STRING: self.message = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0]", "\"\"\" Attributes: - id - roleId - categoryId - status - name -", "not None and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Message') if", "self.response = response self.events = events self.logs = logs def read(self, iprot): if", "other) class App(object): \"\"\" Attributes: - id - userId - status - name", "fid == 1: if ftype == TType.STRING: self.arguments = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0]", "oprot.writeFieldEnd() if self.name is not None: oprot.writeFieldBegin('name', TType.STRING, 4) oprot.writeString(self.name.encode('utf-8') if sys.version_info[0] ==", "iprot, [self.__class__, self.thrift_spec]) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if", "== TType.STOP: break if fid == 1: if ftype == TType.STRING: self.arguments =", "errors='replace') if sys.version_info[0] == 2 else iprot.readString() _val6 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0]", "fid == 3: if ftype == TType.MAP: self.uriFragments = {} (_ktype17, _vtype18, _size16)", "baseUrl self.app = app self.user = user def read(self, iprot): if iprot._fast_decode is", "3: if ftype == TType.STRUCT: self.context = Context() self.context.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype)", "), # 1 (2, TType.STRING, 'data', 'UTF8', None, ), # 2 ) all_structs.append(Log)", "), # 2 (3, TType.STRING, 'body', 'UTF8', None, ), # 3 ) all_structs.append(Event)", "== 2 else iprot.readString() self.config[_key5] = _val6 iprot.readMapEnd() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd()", "range(_size36): _elem41 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() self.scopes.append(_elem41) iprot.readListEnd()", "2 else iprot.readString() _val22 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()", ") all_structs.append(Action) Action.thrift_spec = ( None, # 0 (1, TType.STRING, 'name', 'UTF8', None,", "2 else iprot.readString() self.parameters[_key28] = _val29 iprot.readMapEnd() else: iprot.skip(ftype) elif fid == 5:", "from thrift.TRecursive import fix_spec import sys from thrift.transport import TTransport all_structs = []", "TType.MAP, 3) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.config)) for kiter7, viter8 in self.config.items(): oprot.writeString(kiter7.encode('utf-8') if sys.version_info[0]", "TType.STRUCT, 2) self.request.write(oprot) oprot.writeFieldEnd() if self.context is not None: oprot.writeFieldBegin('context', TType.STRUCT, 3) self.context.write(oprot)", "fid == 2: if ftype == TType.STRUCT: self.request = Request() self.request.read(iprot) else: iprot.skip(ftype)", "TType.STRUCT: self.response = Response() self.response.read(iprot) else: iprot.skip(ftype) elif fid == 2: if ftype", "http - rpc \"\"\" def __init__(self, http=None, rpc=None,): self.http = http self.rpc =", "self.headers[_key69] = _val70 iprot.readMapEnd() else: iprot.skip(ftype) elif fid == 3: if ftype ==", "== 2: if ftype == TType.MAP: self.headers = {} (_ktype10, _vtype11, _size9) =", "not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Execute') if self.action is not None: oprot.writeFieldBegin('action',", "other.__dict__ def __ne__(self, other): return not (self == other) class Context(object): \"\"\" Attributes:", "else: iprot.skip(ftype) elif fid == 3: if ftype == TType.I64: self.categoryId = iprot.readI64()", "0 (1, TType.STRING, 'name', 'UTF8', None, ), # 1 (2, TType.STRING, 'type', 'UTF8',", "for _i20 in range(_size16): _key21 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else", "if sys.version_info[0] == 2 else self.url) oprot.writeFieldEnd() if self.appKey is not None: oprot.writeFieldBegin('appKey',", "self.http = http self.rpc = rpc def read(self, iprot): if iprot._fast_decode is not", "if fid == 1: if ftype == TType.STRING: self.arguments = iprot.readString().decode('utf-8', errors='replace') if", "rpc \"\"\" def __init__(self, http=None, rpc=None,): self.http = http self.rpc = rpc def", "2 else self.method) oprot.writeFieldEnd() if self.headers is not None: oprot.writeFieldBegin('headers', TType.MAP, 2) oprot.writeMapBegin(TType.STRING,", "None, ), # 2 ) all_structs.append(Connection) Connection.thrift_spec = ( None, # 0 (1,", "if sys.version_info[0] == 2 else iter49) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return", "TType.I32, 3) oprot.writeI32(self.status) oprot.writeFieldEnd() if self.name is not None: oprot.writeFieldBegin('name', TType.STRING, 4) oprot.writeString(self.name.encode('utf-8')", "viter8 in self.config.items(): oprot.writeString(kiter7.encode('utf-8') if sys.version_info[0] == 2 else kiter7) oprot.writeString(viter8.encode('utf-8') if sys.version_info[0]", "self.body = body def read(self, iprot): if iprot._fast_decode is not None and isinstance(iprot.trans,", "self.parameters.append(_elem47) iprot.readListEnd() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot._fast_encode", "# 2 (3, TType.I32, 'status', None, None, ), # 3 (4, TType.STRING, 'name',", "parameters=None,): self.id = id self.userId = userId self.status = status self.name = name", "class Action(object): \"\"\" Attributes: - name - code \"\"\" def __init__(self, name=None, code=None,):", "'routeId', None, None, ), # 1 (2, TType.STRING, 'baseUrl', 'UTF8', None, ), #", "def __init__(self, success=None, message=None,): self.success = success self.message = message def read(self, iprot):", "oprot.writeString(self.name.encode('utf-8') if sys.version_info[0] == 2 else self.name) oprot.writeFieldEnd() if self.url is not None:", "errors='replace') if sys.version_info[0] == 2 else iprot.readString() else: iprot.skip(ftype) elif fid == 2:", "None and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Log') if self.level", "__ne__(self, other): return not (self == other) class Log(object): \"\"\" Attributes: - level", "else kiter7) oprot.writeString(viter8.encode('utf-8') if sys.version_info[0] == 2 else viter8) oprot.writeMapEnd() oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd()", "), # 1 (2, TType.LIST, 'events', (TType.STRUCT, [Event, None], False), None, ), #", "(6, TType.STRING, 'email', 'UTF8', None, ), # 6 (7, TType.I32, 'points', None, None,", "), # 1 (2, TType.STRUCT, 'rpc', [RpcRequest, None], None, ), # 2 )", "None], None, ), # 4 ) all_structs.append(App) App.thrift_spec = ( None, # 0", "not None and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('App') if", "sys.version_info[0] == 2 else self.body) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self):", "'UTF8', None, ), # 1 (2, TType.STRING, 'message', 'UTF8', None, ), # 2", "iprot.skip(ftype) elif fid == 6: if ftype == TType.STRING: self.appKey = iprot.readString().decode('utf-8', errors='replace')", "if fid == 1: if ftype == TType.STRING: self.name = iprot.readString().decode('utf-8', errors='replace') if", "if fid == 1: if ftype == TType.STRING: self.method = iprot.readString().decode('utf-8', errors='replace') if", "self.data is not None: oprot.writeFieldBegin('data', TType.STRING, 2) oprot.writeString(self.data.encode('utf-8') if sys.version_info[0] == 2 else", "# 1 (2, TType.I64, 'userId', None, None, ), # 2 (3, TType.I32, 'status',", "__ne__(self, other): return not (self == other) class App(object): \"\"\" Attributes: - id", "else kiter32) oprot.writeString(viter33.encode('utf-8') if sys.version_info[0] == 2 else viter33) oprot.writeMapEnd() oprot.writeFieldEnd() if self.parameters", "2 else iprot.readString() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if", "return not (self == other) class Request(object): \"\"\" Attributes: - http - rpc", "'UTF8', None, ), # 5 (6, TType.STRING, 'email', 'UTF8', None, ), # 6", "== TType.STRUCT: self.rpc = RpcRequest() self.rpc.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def", "2 (3, TType.MAP, 'config', (TType.STRING, 'UTF8', TType.STRING, 'UTF8', False), None, ), # 3", "0 (1, TType.STRING, 'action', 'UTF8', None, ), # 1 (2, TType.STRUCT, 'request', [Request,", "TType.MAP, 'headers', (TType.STRING, 'UTF8', TType.STRING, 'UTF8', False), None, ), # 2 (3, TType.STRING,", "== 3: if ftype == TType.MAP: self.uriFragments = {} (_ktype17, _vtype18, _size16) =", "oprot.writeFieldEnd() if self.name is not None: oprot.writeFieldBegin('name', TType.STRING, 5) oprot.writeString(self.name.encode('utf-8') if sys.version_info[0] ==", "if sys.version_info[0] == 2 else kiter32) oprot.writeString(viter33.encode('utf-8') if sys.version_info[0] == 2 else viter33)", "id - userId - status - name - url - appKey - scopes", "(1, TType.STRING, 'method', 'UTF8', None, ), # 1 (2, TType.MAP, 'headers', (TType.STRING, 'UTF8',", "# 2 ) all_structs.append(Execute) Execute.thrift_spec = ( None, # 0 (1, TType.STRING, 'action',", "== 2: if ftype == TType.STRING: self.message = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] ==", "(self == other) class Response(object): \"\"\" Attributes: - statusCode - headers - body", "if fid == 1: if ftype == TType.I64: self.routeId = iprot.readI64() else: iprot.skip(ftype)", "# 2 (3, TType.LIST, 'logs', (TType.STRUCT, [Log, None], False), None, ), # 3", "if sys.version_info[0] == 2 else viter35) oprot.writeMapEnd() oprot.writeFieldEnd() if self.body is not None:", "if self.appKey is not None: oprot.writeFieldBegin('appKey', TType.STRING, 6) oprot.writeString(self.appKey.encode('utf-8') if sys.version_info[0] == 2", "= ( None, # 0 (1, TType.STRING, 'method', 'UTF8', None, ), # 1", "if sys.version_info[0] == 2 else self.level) oprot.writeFieldEnd() if self.message is not None: oprot.writeFieldBegin('message',", "status=None, name=None, url=None, appKey=None, scopes=None, parameters=None,): self.id = id self.userId = userId self.status", "oprot.writeStructBegin('HttpRequest') if self.method is not None: oprot.writeFieldBegin('method', TType.STRING, 1) oprot.writeString(self.method.encode('utf-8') if sys.version_info[0] ==", "None, ), # 2 (3, TType.I32, 'status', None, None, ), # 3 (4,", "else: iprot.skip(ftype) elif fid == 3: if ftype == TType.I32: self.status = iprot.readI32()", "== TType.STRING: self.action = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() else:", "return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP:", "ftype == TType.I32: self.status = iprot.readI32() else: iprot.skip(ftype) elif fid == 5: if", "= iprot.readI64() else: iprot.skip(ftype) elif fid == 4: if ftype == TType.I32: self.status", "py # from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException from thrift.protocol.TProtocol import", "oprot.writeI64(self.roleId) oprot.writeFieldEnd() if self.categoryId is not None: oprot.writeFieldBegin('categoryId', TType.I64, 3) oprot.writeI64(self.categoryId) oprot.writeFieldEnd() if", "in self.parameters.items(): oprot.writeString(kiter34.encode('utf-8') if sys.version_info[0] == 2 else kiter34) oprot.writeString(viter35.encode('utf-8') if sys.version_info[0] ==", "7: if ftype == TType.I32: self.points = iprot.readI32() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd()", "__ne__(self, other): return not (self == other) class Response(object): \"\"\" Attributes: - statusCode", "not None: oprot.writeFieldBegin('name', TType.STRING, 4) oprot.writeString(self.name.encode('utf-8') if sys.version_info[0] == 2 else self.name) oprot.writeFieldEnd()", "if self.parameters is not None: oprot.writeFieldBegin('parameters', TType.LIST, 8) oprot.writeListBegin(TType.STRING, len(self.parameters)) for iter49 in", "sys.version_info[0] == 2 else self.name) oprot.writeFieldEnd() if self.url is not None: oprot.writeFieldBegin('url', TType.STRING,", "'UTF8', TType.STRING, 'UTF8', False), None, ), # 4 (5, TType.STRING, 'body', 'UTF8', None,", "oprot.writeFieldEnd() if self.headers is not None: oprot.writeFieldBegin('headers', TType.MAP, 2) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.headers)) for", "range(_size50): _elem55 = Event() _elem55.read(iprot) self.events.append(_elem55) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 3:", "), # 4 (5, TType.STRING, 'name', 'UTF8', None, ), # 5 (6, TType.STRING,", "== 3: if ftype == TType.STRUCT: self.context = Context() self.context.read(iprot) else: iprot.skip(ftype) else:", "- events - logs \"\"\" def __init__(self, response=None, events=None, logs=None,): self.response = response", "sys.version_info[0] == 2 else viter72) oprot.writeMapEnd() oprot.writeFieldEnd() if self.body is not None: oprot.writeFieldBegin('body',", "else iprot.readString() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRING: self.message", "if ftype == TType.BOOL: self.success = iprot.readBool() else: iprot.skip(ftype) elif fid == 2:", "if self.rpc is not None: oprot.writeFieldBegin('rpc', TType.STRUCT, 2) self.rpc.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def", "oprot.writeI64(self.id) oprot.writeFieldEnd() if self.userId is not None: oprot.writeFieldBegin('userId', TType.I64, 2) oprot.writeI64(self.userId) oprot.writeFieldEnd() if", "self.headers = headers self.body = body def read(self, iprot): if iprot._fast_decode is not", "sys.version_info[0] == 2 else self.name) oprot.writeFieldEnd() if self.type is not None: oprot.writeFieldBegin('type', TType.STRING,", "oprot.writeFieldBegin('request', TType.STRUCT, 2) self.request.write(oprot) oprot.writeFieldEnd() if self.context is not None: oprot.writeFieldBegin('context', TType.STRUCT, 3)", "TType.STRUCT: self.rpc = RpcRequest() self.rpc.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self,", "TType.STRING: self.name = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() else: iprot.skip(ftype)", "id=None, roleId=None, categoryId=None, status=None, name=None, email=None, points=None,): self.id = id self.roleId = roleId", "is not None and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Action')", "fid == 1: if ftype == TType.I64: self.routeId = iprot.readI64() else: iprot.skip(ftype) elif", "2 else self.action) oprot.writeFieldEnd() if self.request is not None: oprot.writeFieldBegin('request', TType.STRUCT, 2) self.request.write(oprot)", "None, ), # 3 ) all_structs.append(Event) Event.thrift_spec = ( None, # 0 (1,", "fid == 1: if ftype == TType.STRUCT: self.http = HttpRequest() self.http.read(iprot) else: iprot.skip(ftype)", "(6, TType.STRING, 'appKey', 'UTF8', None, ), # 6 (7, TType.LIST, 'scopes', (TType.STRING, 'UTF8',", "2 else iprot.readString() self.uriFragments[_key21] = _val22 iprot.readMapEnd() else: iprot.skip(ftype) elif fid == 4:", "), # 1 (2, TType.STRING, 'type', 'UTF8', None, ), # 2 (3, TType.MAP,", "iprot.readListBegin() for _i54 in range(_size50): _elem55 = Event() _elem55.read(iprot) self.events.append(_elem55) iprot.readListEnd() else: iprot.skip(ftype)", "context def read(self, iprot): if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and", "self.rpc.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r' %", "statusCode self.headers = headers self.body = body def read(self, iprot): if iprot._fast_decode is", "errors='replace') if sys.version_info[0] == 2 else iprot.readString() else: iprot.skip(ftype) elif fid == 3:", "status=None, name=None, email=None, points=None,): self.id = id self.roleId = roleId self.categoryId = categoryId", "Message(object): \"\"\" Attributes: - success - message \"\"\" def __init__(self, success=None, message=None,): self.success", "iprot.skip(ftype) elif fid == 2: if ftype == TType.STRING: self.message = iprot.readString().decode('utf-8', errors='replace')", "else viter33) oprot.writeMapEnd() oprot.writeFieldEnd() if self.parameters is not None: oprot.writeFieldBegin('parameters', TType.MAP, 4) oprot.writeMapBegin(TType.STRING,", "( None, # 0 (1, TType.BOOL, 'success', None, None, ), # 1 (2,", "== 2 else self.name) oprot.writeFieldEnd() if self.code is not None: oprot.writeFieldBegin('code', TType.STRING, 2)", "[self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Message') if self.success is not None: oprot.writeFieldBegin('success', TType.BOOL, 1) oprot.writeBool(self.success)", "# 3 ) all_structs.append(Request) Request.thrift_spec = ( None, # 0 (1, TType.STRUCT, 'http',", "validate(self): return def __repr__(self): L = ['%s=%r' % (key, value) for key, value", "self.status is not None: oprot.writeFieldBegin('status', TType.I32, 4) oprot.writeI32(self.status) oprot.writeFieldEnd() if self.name is not", "if self.points is not None: oprot.writeFieldBegin('points', TType.I32, 7) oprot.writeI32(self.points) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def", "== 2 else iprot.readString() else: iprot.skip(ftype) elif fid == 5: if ftype ==", "oprot.writeString(self.body.encode('utf-8') if sys.version_info[0] == 2 else self.body) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return", "'UTF8', False), None, ), # 2 (3, TType.STRING, 'body', 'UTF8', None, ), #", "== TType.I32: self.status = iprot.readI32() else: iprot.skip(ftype) elif fid == 5: if ftype", "TType.I32: self.status = iprot.readI32() else: iprot.skip(ftype) elif fid == 4: if ftype ==", "iter62.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.logs is not None: oprot.writeFieldBegin('logs', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.logs))", "elif fid == 2: if ftype == TType.MAP: self.headers = {} (_ktype65, _vtype66,", "else iprot.readString() self.scopes.append(_elem41) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 8: if ftype ==", "if self.response is not None: oprot.writeFieldBegin('response', TType.STRUCT, 1) self.response.write(oprot) oprot.writeFieldEnd() if self.events is", "Attributes: - response - events - logs \"\"\" def __init__(self, response=None, events=None, logs=None,):", "None, ), # 3 (4, TType.STRUCT, 'user', [User, None], None, ), # 4", "for kiter30, viter31 in self.headers.items(): oprot.writeString(kiter30.encode('utf-8') if sys.version_info[0] == 2 else kiter30) oprot.writeString(viter31.encode('utf-8')", "self.rpc = RpcRequest() self.rpc.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot):", "SURE THAT YOU KNOW WHAT YOU ARE DOING # # options string: py", "4) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.parameters)) for kiter34, viter35 in self.parameters.items(): oprot.writeString(kiter34.encode('utf-8') if sys.version_info[0] ==", "is not None: oprot.writeFieldBegin('message', TType.STRING, 2) oprot.writeString(self.message.encode('utf-8') if sys.version_info[0] == 2 else self.message)", "all_structs.append(Response) Response.thrift_spec = ( None, # 0 (1, TType.I32, 'statusCode', None, None, ),", "userId=None, status=None, name=None, url=None, appKey=None, scopes=None, parameters=None,): self.id = id self.userId = userId", "None: oprot.writeFieldBegin('roleId', TType.I64, 2) oprot.writeI64(self.roleId) oprot.writeFieldEnd() if self.categoryId is not None: oprot.writeFieldBegin('categoryId', TType.I64,", "None, # 0 (1, TType.STRING, 'action', 'UTF8', None, ), # 1 (2, TType.STRUCT,", "self.email) oprot.writeFieldEnd() if self.points is not None: oprot.writeFieldBegin('points', TType.I32, 7) oprot.writeI32(self.points) oprot.writeFieldEnd() oprot.writeFieldStop()", "Attributes: - level - message \"\"\" def __init__(self, level=None, message=None,): self.level = level", "'request', [Request, None], None, ), # 2 (3, TType.STRUCT, 'context', [Context, None], None,", "appKey self.scopes = scopes self.parameters = parameters def read(self, iprot): if iprot._fast_decode is", "Attributes: - action - request - context \"\"\" def __init__(self, action=None, request=None, context=None,):", "# options string: py # from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException", "self.http is not None: oprot.writeFieldBegin('http', TType.STRUCT, 1) self.http.write(oprot) oprot.writeFieldEnd() if self.rpc is not", "ftype == TType.STRING: self.type = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()", "ftype == TType.STRING: self.eventName = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()", "message=None,): self.level = level self.message = message def read(self, iprot): if iprot._fast_decode is", "_size9) = iprot.readMapBegin() for _i13 in range(_size9): _key14 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0]", "\"\"\" def __init__(self, id=None, roleId=None, categoryId=None, status=None, name=None, email=None, points=None,): self.id = id", "oprot.writeString(self.name.encode('utf-8') if sys.version_info[0] == 2 else self.name) oprot.writeFieldEnd() if self.type is not None:", "self.roleId is not None: oprot.writeFieldBegin('roleId', TType.I64, 2) oprot.writeI64(self.roleId) oprot.writeFieldEnd() if self.categoryId is not", "else: iprot.skip(ftype) elif fid == 3: if ftype == TType.STRUCT: self.app = App()", "not None: oprot.writeFieldBegin('roleId', TType.I64, 2) oprot.writeI64(self.roleId) oprot.writeFieldEnd() if self.categoryId is not None: oprot.writeFieldBegin('categoryId',", "2 else viter8) oprot.writeMapEnd() oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L", "for _i40 in range(_size36): _elem41 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else", "oprot.writeFieldBegin('events', TType.LIST, 2) oprot.writeListBegin(TType.STRUCT, len(self.events)) for iter62 in self.events: iter62.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if", "fid == 5: if ftype == TType.STRING: self.name = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0]", "_size16) = iprot.readMapBegin() for _i20 in range(_size16): _key21 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0]", "= Response() self.response.read(iprot) else: iprot.skip(ftype) elif fid == 2: if ftype == TType.LIST:", "oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.headers)) for kiter71, viter72 in self.headers.items(): oprot.writeString(kiter71.encode('utf-8') if sys.version_info[0] == 2", "(7, TType.LIST, 'scopes', (TType.STRING, 'UTF8', False), None, ), # 7 (8, TType.LIST, 'parameters',", "TType.STRING, 'eventName', 'UTF8', None, ), # 1 (2, TType.STRING, 'data', 'UTF8', None, ),", "self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Connection') if self.name is not", "else: iprot.skip(ftype) elif fid == 4: if ftype == TType.MAP: self.parameters = {}", "iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() self.headers[_key69] = _val70 iprot.readMapEnd() else:", "fid == 1: if ftype == TType.I32: self.statusCode = iprot.readI32() else: iprot.skip(ftype) elif", "oprot.writeString(viter8.encode('utf-8') if sys.version_info[0] == 2 else viter8) oprot.writeMapEnd() oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self):", "options string: py # from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException from", "is not None and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Request')", "data \"\"\" def __init__(self, eventName=None, data=None,): self.eventName = eventName self.data = data def", "self.name = name self.type = type self.config = config def read(self, iprot): if", "else: iprot.skip(ftype) elif fid == 7: if ftype == TType.I32: self.points = iprot.readI32()", "TType.STRING, 'body', 'UTF8', None, ), # 3 ) all_structs.append(Event) Event.thrift_spec = ( None,", "kiter7) oprot.writeString(viter8.encode('utf-8') if sys.version_info[0] == 2 else viter8) oprot.writeMapEnd() oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def", "None: oprot.writeFieldBegin('email', TType.STRING, 6) oprot.writeString(self.email.encode('utf-8') if sys.version_info[0] == 2 else self.email) oprot.writeFieldEnd() if", ") all_structs.append(HttpRequest) HttpRequest.thrift_spec = ( None, # 0 (1, TType.STRING, 'method', 'UTF8', None,", "== 2 else self.name) oprot.writeFieldEnd() if self.email is not None: oprot.writeFieldBegin('email', TType.STRING, 6)", "== other.__dict__ def __ne__(self, other): return not (self == other) class User(object): \"\"\"", "\"\"\" Attributes: - arguments \"\"\" def __init__(self, arguments=None,): self.arguments = arguments def read(self,", "TType.STRING, 'arguments', 'UTF8', None, ), # 1 ) all_structs.append(Context) Context.thrift_spec = ( None,", "if sys.version_info[0] == 2 else self.appKey) oprot.writeFieldEnd() if self.scopes is not None: oprot.writeFieldBegin('scopes',", "self.thrift_spec])) return oprot.writeStructBegin('User') if self.id is not None: oprot.writeFieldBegin('id', TType.I64, 1) oprot.writeI64(self.id) oprot.writeFieldEnd()", "if ftype == TType.I64: self.userId = iprot.readI64() else: iprot.skip(ftype) elif fid == 3:", "self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('RpcRequest') if self.arguments is not", "errors='replace') if sys.version_info[0] == 2 else iprot.readString() _val70 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0]", "None and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('HttpRequest') if self.method", "= name self.type = type self.config = config def read(self, iprot): if iprot._fast_decode", "== 2 else self.appKey) oprot.writeFieldEnd() if self.scopes is not None: oprot.writeFieldBegin('scopes', TType.LIST, 7)", "break if fid == 1: if ftype == TType.STRING: self.method = iprot.readString().decode('utf-8', errors='replace')", "oprot.writeString(kiter7.encode('utf-8') if sys.version_info[0] == 2 else kiter7) oprot.writeString(viter8.encode('utf-8') if sys.version_info[0] == 2 else", "else: iprot.skip(ftype) elif fid == 2: if ftype == TType.I64: self.roleId = iprot.readI64()", "and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Message') if self.success is", "if self.app is not None: oprot.writeFieldBegin('app', TType.STRUCT, 3) self.app.write(oprot) oprot.writeFieldEnd() if self.user is", "self.action is not None: oprot.writeFieldBegin('action', TType.STRING, 1) oprot.writeString(self.action.encode('utf-8') if sys.version_info[0] == 2 else", "== 2 else kiter30) oprot.writeString(viter31.encode('utf-8') if sys.version_info[0] == 2 else viter31) oprot.writeMapEnd() oprot.writeFieldEnd()", "'arguments', 'UTF8', None, ), # 1 ) all_structs.append(Context) Context.thrift_spec = ( None, #", "events self.logs = logs def read(self, iprot): if iprot._fast_decode is not None and", "None, None, ), # 1 (2, TType.I64, 'roleId', None, None, ), # 2", "DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE", "if sys.version_info[0] == 2 else self.action) oprot.writeFieldEnd() if self.request is not None: oprot.writeFieldBegin('request',", "iprot.readI32() else: iprot.skip(ftype) elif fid == 5: if ftype == TType.STRING: self.name =", "if ftype == TType.STRUCT: self.rpc = RpcRequest() self.rpc.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd()", "1: if ftype == TType.I32: self.statusCode = iprot.readI32() else: iprot.skip(ftype) elif fid ==", "self.statusCode = statusCode self.headers = headers self.body = body def read(self, iprot): if", "\"\"\" def __init__(self, eventName=None, data=None,): self.eventName = eventName self.data = data def read(self,", "['%s=%r' % (key, value) for key, value in self.__dict__.items()] return '%s(%s)' % (self.__class__.__name__,", "_size50) = iprot.readListBegin() for _i54 in range(_size50): _elem55 = Event() _elem55.read(iprot) self.events.append(_elem55) iprot.readListEnd()", "None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Response') if self.statusCode is not None: oprot.writeFieldBegin('statusCode', TType.I32,", "iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin()", "other) class Log(object): \"\"\" Attributes: - level - message \"\"\" def __init__(self, level=None,", "4 (5, TType.STRING, 'url', 'UTF8', None, ), # 5 (6, TType.STRING, 'appKey', 'UTF8',", "self.level) oprot.writeFieldEnd() if self.message is not None: oprot.writeFieldBegin('message', TType.STRING, 2) oprot.writeString(self.message.encode('utf-8') if sys.version_info[0]", "1: if ftype == TType.STRING: self.method = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2", "None, ), # 2 ) all_structs.append(HttpRequest) HttpRequest.thrift_spec = ( None, # 0 (1,", "None, ), # 1 (2, TType.I64, 'userId', None, None, ), # 2 (3,", "'UTF8', None, ), # 1 (2, TType.STRING, 'type', 'UTF8', None, ), # 2", "1) self.http.write(oprot) oprot.writeFieldEnd() if self.rpc is not None: oprot.writeFieldBegin('rpc', TType.STRUCT, 2) self.rpc.write(oprot) oprot.writeFieldEnd()", "else self.arguments) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r'", "is not None: oprot.writeFieldBegin('email', TType.STRING, 6) oprot.writeString(self.email.encode('utf-8') if sys.version_info[0] == 2 else self.email)", "None: oprot.writeFieldBegin('name', TType.STRING, 5) oprot.writeString(self.name.encode('utf-8') if sys.version_info[0] == 2 else self.name) oprot.writeFieldEnd() if", "iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype ==", "TType.MAP: self.config = {} (_ktype1, _vtype2, _size0) = iprot.readMapBegin() for _i4 in range(_size0):", "== 4: if ftype == TType.STRING: self.name = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] ==", "2 else self.email) oprot.writeFieldEnd() if self.points is not None: oprot.writeFieldBegin('points', TType.I32, 7) oprot.writeI32(self.points)", "oprot.writeString(self.data.encode('utf-8') if sys.version_info[0] == 2 else self.data) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return", "1: if ftype == TType.I64: self.routeId = iprot.readI64() else: iprot.skip(ftype) elif fid ==", "True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid", "oprot.writeFieldBegin('rpc', TType.STRUCT, 2) self.rpc.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L", "- app - user \"\"\" def __init__(self, routeId=None, baseUrl=None, app=None, user=None,): self.routeId =", "is not None: oprot.writeFieldBegin('response', TType.STRUCT, 1) self.response.write(oprot) oprot.writeFieldEnd() if self.events is not None:", "not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Connection') if self.name is not None: oprot.writeFieldBegin('name',", "Request() self.request.read(iprot) else: iprot.skip(ftype) elif fid == 3: if ftype == TType.STRUCT: self.context", "sys.version_info[0] == 2 else iprot.readString() else: iprot.skip(ftype) elif fid == 5: if ftype", "def __ne__(self, other): return not (self == other) class Response(object): \"\"\" Attributes: -", "- body \"\"\" def __init__(self, statusCode=None, headers=None, body=None,): self.statusCode = statusCode self.headers =", "== TType.STOP: break if fid == 1: if ftype == TType.I32: self.statusCode =", "== 2 else self.data) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L", "_size64) = iprot.readMapBegin() for _i68 in range(_size64): _key69 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0]", "None, ), # 2 (3, TType.MAP, 'config', (TType.STRING, 'UTF8', TType.STRING, 'UTF8', False), None,", "= ['%s=%r' % (key, value) for key, value in self.__dict__.items()] return '%s(%s)' %", "self.routeId = routeId self.baseUrl = baseUrl self.app = app self.user = user def", "1: if ftype == TType.STRING: self.level = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2", "code=None,): self.name = name self.code = code def read(self, iprot): if iprot._fast_decode is", "- uriFragments - parameters - body \"\"\" def __init__(self, method=None, headers=None, uriFragments=None, parameters=None,", "= Log() _elem61.read(iprot) self.logs.append(_elem61) iprot.readListEnd() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self,", "def __init__(self, http=None, rpc=None,): self.http = http self.rpc = rpc def read(self, iprot):", "= categoryId self.status = status self.name = name self.email = email self.points =", "= headers self.body = body def read(self, iprot): if iprot._fast_decode is not None", "- userId - status - name - url - appKey - scopes -", "None: oprot.writeFieldBegin('categoryId', TType.I64, 3) oprot.writeI64(self.categoryId) oprot.writeFieldEnd() if self.status is not None: oprot.writeFieldBegin('status', TType.I32,", "else self.url) oprot.writeFieldEnd() if self.appKey is not None: oprot.writeFieldBegin('appKey', TType.STRING, 6) oprot.writeString(self.appKey.encode('utf-8') if", "self.message) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r' %", "self.events.append(_elem55) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 3: if ftype == TType.LIST: self.logs", "oprot.writeString(self.eventName.encode('utf-8') if sys.version_info[0] == 2 else self.eventName) oprot.writeFieldEnd() if self.data is not None:", "== TType.STRING: self.method = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() else:", "_val29 iprot.readMapEnd() else: iprot.skip(ftype) elif fid == 5: if ftype == TType.STRING: self.body", "self.app.read(iprot) else: iprot.skip(ftype) elif fid == 4: if ftype == TType.STRUCT: self.user =", "eventName self.data = data def read(self, iprot): if iprot._fast_decode is not None and", "iprot.readString() self.config[_key5] = _val6 iprot.readMapEnd() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self,", "message=None,): self.success = success self.message = message def read(self, iprot): if iprot._fast_decode is", "[] (_etype39, _size36) = iprot.readListBegin() for _i40 in range(_size36): _elem41 = iprot.readString().decode('utf-8', errors='replace')", "'UTF8', None, ), # 5 ) all_structs.append(RpcRequest) RpcRequest.thrift_spec = ( None, # 0", "elif fid == 4: if ftype == TType.STRUCT: self.user = User() self.user.read(iprot) else:", "TType.STRUCT: self.app = App() self.app.read(iprot) else: iprot.skip(ftype) elif fid == 4: if ftype", "not None and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Response') if", "headers self.body = body def read(self, iprot): if iprot._fast_decode is not None and", "all_structs.append(User) User.thrift_spec = ( None, # 0 (1, TType.I64, 'id', None, None, ),", "\"\"\" Attributes: - response - events - logs \"\"\" def __init__(self, response=None, events=None,", "is not None: oprot.writeFieldBegin('headers', TType.MAP, 2) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.headers)) for kiter30, viter31 in", "oprot.writeListBegin(TType.STRING, len(self.parameters)) for iter49 in self.parameters: oprot.writeString(iter49.encode('utf-8') if sys.version_info[0] == 2 else iter49)", "(3, TType.MAP, 'config', (TType.STRING, 'UTF8', TType.STRING, 'UTF8', False), None, ), # 3 )", "return oprot.writeStructBegin('Event') if self.eventName is not None: oprot.writeFieldBegin('eventName', TType.STRING, 1) oprot.writeString(self.eventName.encode('utf-8') if sys.version_info[0]", "if ftype == TType.STRING: self.message = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else", "self.user = User() self.user.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot):", "(7, TType.I32, 'points', None, None, ), # 7 ) all_structs.append(Result) Result.thrift_spec = (", "None, ), # 2 (3, TType.I64, 'categoryId', None, None, ), # 3 (4,", "== other) class Request(object): \"\"\" Attributes: - http - rpc \"\"\" def __init__(self,", "TType.STRING: self.body = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() else: iprot.skip(ftype)", "TType.BOOL: self.success = iprot.readBool() else: iprot.skip(ftype) elif fid == 2: if ftype ==", "else self.appKey) oprot.writeFieldEnd() if self.scopes is not None: oprot.writeFieldBegin('scopes', TType.LIST, 7) oprot.writeListBegin(TType.STRING, len(self.scopes))", "== 3: if ftype == TType.I32: self.status = iprot.readI32() else: iprot.skip(ftype) elif fid", "__init__(self, name=None, type=None, config=None,): self.name = name self.type = type self.config = config", "False), None, ), # 8 ) all_structs.append(User) User.thrift_spec = ( None, # 0", "None: oprot.writeFieldBegin('appKey', TType.STRING, 6) oprot.writeString(self.appKey.encode('utf-8') if sys.version_info[0] == 2 else self.appKey) oprot.writeFieldEnd() if", "3 (4, TType.MAP, 'parameters', (TType.STRING, 'UTF8', TType.STRING, 'UTF8', False), None, ), # 4", "self.config.items(): oprot.writeString(kiter7.encode('utf-8') if sys.version_info[0] == 2 else kiter7) oprot.writeString(viter8.encode('utf-8') if sys.version_info[0] == 2", "None: oprot.writeFieldBegin('arguments', TType.STRING, 1) oprot.writeString(self.arguments.encode('utf-8') if sys.version_info[0] == 2 else self.arguments) oprot.writeFieldEnd() oprot.writeFieldStop()", "== other.__dict__ def __ne__(self, other): return not (self == other) class Log(object): \"\"\"", "iprot.skip(ftype) elif fid == 3: if ftype == TType.STRUCT: self.context = Context() self.context.read(iprot)", "0 (1, TType.STRING, 'level', 'UTF8', None, ), # 1 (2, TType.STRING, 'message', 'UTF8',", "fid == 5: if ftype == TType.STRING: self.body = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0]", "oprot.writeI64(self.categoryId) oprot.writeFieldEnd() if self.status is not None: oprot.writeFieldBegin('status', TType.I32, 4) oprot.writeI32(self.status) oprot.writeFieldEnd() if", "<filename>worker/ttypes.py # # Autogenerated by Thrift Compiler (0.14.2) # # DO NOT EDIT", "False), None, ), # 3 ) all_structs.append(Response) Response.thrift_spec = ( None, # 0", "None and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Connection') if self.name", "return not (self == other) class App(object): \"\"\" Attributes: - id - userId", "viter33 in self.uriFragments.items(): oprot.writeString(kiter32.encode('utf-8') if sys.version_info[0] == 2 else kiter32) oprot.writeString(viter33.encode('utf-8') if sys.version_info[0]", "# 2 ) all_structs.append(Connection) Connection.thrift_spec = ( None, # 0 (1, TType.STRING, 'name',", "import TProtocolException from thrift.TRecursive import fix_spec import sys from thrift.transport import TTransport all_structs", "== 2 else iprot.readString() else: iprot.skip(ftype) elif fid == 6: if ftype ==", "self.appKey = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() else: iprot.skip(ftype) elif", "ftype == TType.STRUCT: self.context = Context() self.context.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd()", "= iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() self.scopes.append(_elem41) iprot.readListEnd() else: iprot.skip(ftype)", "= code def read(self, iprot): if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport)", "if sys.version_info[0] == 2 else kiter30) oprot.writeString(viter31.encode('utf-8') if sys.version_info[0] == 2 else viter31)", "fid == 8: if ftype == TType.LIST: self.parameters = [] (_etype45, _size42) =", "None: oprot.writeFieldBegin('name', TType.STRING, 4) oprot.writeString(self.name.encode('utf-8') if sys.version_info[0] == 2 else self.name) oprot.writeFieldEnd() if", "TType.STOP: break if fid == 1: if ftype == TType.BOOL: self.success = iprot.readBool()", "sys.version_info[0] == 2 else iprot.readString() self.uriFragments[_key21] = _val22 iprot.readMapEnd() else: iprot.skip(ftype) elif fid", "None: oprot.writeFieldBegin('events', TType.LIST, 2) oprot.writeListBegin(TType.STRUCT, len(self.events)) for iter62 in self.events: iter62.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd()", "is not None: oprot.writeFieldBegin('level', TType.STRING, 1) oprot.writeString(self.level.encode('utf-8') if sys.version_info[0] == 2 else self.level)", "= ( None, # 0 (1, TType.BOOL, 'success', None, None, ), # 1", "TType.STRING, len(self.headers)) for kiter30, viter31 in self.headers.items(): oprot.writeString(kiter30.encode('utf-8') if sys.version_info[0] == 2 else", "_vtype66, _size64) = iprot.readMapBegin() for _i68 in range(_size64): _key69 = iprot.readString().decode('utf-8', errors='replace') if", "oprot.writeFieldBegin('code', TType.STRING, 2) oprot.writeString(self.code.encode('utf-8') if sys.version_info[0] == 2 else self.code) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd()", "self.userId = userId self.status = status self.name = name self.url = url self.appKey", "RpcRequest() self.rpc.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot._fast_encode", "( None, # 0 (1, TType.I64, 'routeId', None, None, ), # 1 (2,", "is not None: iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) return iprot.readStructBegin() while True: (fname, ftype,", "headers - uriFragments - parameters - body \"\"\" def __init__(self, method=None, headers=None, uriFragments=None,", "# # options string: py # from thrift.Thrift import TType, TMessageType, TFrozenDict, TException,", "self.body is not None: oprot.writeFieldBegin('body', TType.STRING, 3) oprot.writeString(self.body.encode('utf-8') if sys.version_info[0] == 2 else", "range(_size64): _key69 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() _val70 =", "data=None,): self.eventName = eventName self.data = data def read(self, iprot): if iprot._fast_decode is", "(2, TType.STRING, 'baseUrl', 'UTF8', None, ), # 2 (3, TType.STRUCT, 'app', [App, None],", "is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Context') if self.routeId is not None:", "appKey=None, scopes=None, parameters=None,): self.id = id self.userId = userId self.status = status self.name", "iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() _val6 = iprot.readString().decode('utf-8', errors='replace') if", "iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot._fast_encode is not None and self.thrift_spec is", "iprot.readListBegin() for _i40 in range(_size36): _elem41 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2", "None: oprot.writeFieldBegin('user', TType.STRUCT, 4) self.user.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self):", "fid == 2: if ftype == TType.STRING: self.baseUrl = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0]", "(1, TType.I64, 'id', None, None, ), # 1 (2, TType.I64, 'roleId', None, None,", "TType.STRUCT, 'rpc', [RpcRequest, None], None, ), # 2 ) all_structs.append(HttpRequest) HttpRequest.thrift_spec = (", "== TType.MAP: self.parameters = {} (_ktype24, _vtype25, _size23) = iprot.readMapBegin() for _i27 in", "None, ), # 1 ) all_structs.append(Context) Context.thrift_spec = ( None, # 0 (1,", "== 8: if ftype == TType.LIST: self.parameters = [] (_etype45, _size42) = iprot.readListBegin()", "id self.roleId = roleId self.categoryId = categoryId self.status = status self.name = name", "self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Event') if self.eventName is not", "[self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Log') if self.level is not None: oprot.writeFieldBegin('level', TType.STRING, 1) oprot.writeString(self.level.encode('utf-8')", "ftype == TType.I32: self.status = iprot.readI32() else: iprot.skip(ftype) elif fid == 4: if", "iprot.readMapEnd() else: iprot.skip(ftype) elif fid == 3: if ftype == TType.STRING: self.body =", "oprot.writeString(self.email.encode('utf-8') if sys.version_info[0] == 2 else self.email) oprot.writeFieldEnd() if self.points is not None:", "__init__(self, id=None, roleId=None, categoryId=None, status=None, name=None, email=None, points=None,): self.id = id self.roleId =", "None], None, ), # 3 ) all_structs.append(Request) Request.thrift_spec = ( None, # 0", "\"\"\" Attributes: - action - request - context \"\"\" def __init__(self, action=None, request=None,", "== 2: if ftype == TType.STRING: self.code = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] ==", "if fid == 1: if ftype == TType.STRUCT: self.http = HttpRequest() self.http.read(iprot) else:", "- message \"\"\" def __init__(self, level=None, message=None,): self.level = level self.message = message", "iprot.skip(ftype) elif fid == 7: if ftype == TType.LIST: self.scopes = [] (_etype39,", "self.arguments = arguments def read(self, iprot): if iprot._fast_decode is not None and isinstance(iprot.trans,", "None: oprot.writeFieldBegin('status', TType.I32, 3) oprot.writeI32(self.status) oprot.writeFieldEnd() if self.name is not None: oprot.writeFieldBegin('name', TType.STRING,", "other) class Response(object): \"\"\" Attributes: - statusCode - headers - body \"\"\" def", "- arguments \"\"\" def __init__(self, arguments=None,): self.arguments = arguments def read(self, iprot): if", "sys.version_info[0] == 2 else iprot.readString() else: iprot.skip(ftype) elif fid == 2: if ftype", "None, ), # 3 (4, TType.STRING, 'name', 'UTF8', None, ), # 4 (5,", "TType.I32: self.points = iprot.readI32() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot):", "not None: oprot.writeFieldBegin('rpc', TType.STRUCT, 2) self.rpc.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def", "TType.I64, 3) oprot.writeI64(self.categoryId) oprot.writeFieldEnd() if self.status is not None: oprot.writeFieldBegin('status', TType.I32, 4) oprot.writeI32(self.status)", "oprot.writeListEnd() oprot.writeFieldEnd() if self.parameters is not None: oprot.writeFieldBegin('parameters', TType.LIST, 8) oprot.writeListBegin(TType.STRING, len(self.parameters)) for", "Event.thrift_spec = ( None, # 0 (1, TType.STRING, 'eventName', 'UTF8', None, ), #", "thrift.protocol.TProtocol import TProtocolException from thrift.TRecursive import fix_spec import sys from thrift.transport import TTransport", "== other) class RpcRequest(object): \"\"\" Attributes: - arguments \"\"\" def __init__(self, arguments=None,): self.arguments", "== TType.STOP: break if fid == 1: if ftype == TType.STRING: self.name =", "), # 1 (2, TType.STRING, 'message', 'UTF8', None, ), # 2 ) all_structs.append(Connection)", "status self.name = name self.email = email self.points = points def read(self, iprot):", "self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Request') if self.http is not", "self.parameters = parameters def read(self, iprot): if iprot._fast_decode is not None and isinstance(iprot.trans,", "if ftype == TType.STRUCT: self.response = Response() self.response.read(iprot) else: iprot.skip(ftype) elif fid ==", "TType.LIST, 'events', (TType.STRUCT, [Event, None], False), None, ), # 2 (3, TType.LIST, 'logs',", "None, ), # 6 (7, TType.LIST, 'scopes', (TType.STRING, 'UTF8', False), None, ), #", "self.request is not None: oprot.writeFieldBegin('request', TType.STRUCT, 2) self.request.write(oprot) oprot.writeFieldEnd() if self.context is not", "return def __repr__(self): L = ['%s=%r' % (key, value) for key, value in", "if sys.version_info[0] == 2 else iprot.readString() _val29 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] ==", "TType.STRING, 2) oprot.writeString(self.data.encode('utf-8') if sys.version_info[0] == 2 else self.data) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def", "None, ), # 2 ) all_structs.append(Execute) Execute.thrift_spec = ( None, # 0 (1,", "roleId - categoryId - status - name - email - points \"\"\" def", "sys.version_info[0] == 2 else iprot.readString() _val6 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2", "2 else self.name) oprot.writeFieldEnd() if self.email is not None: oprot.writeFieldBegin('email', TType.STRING, 6) oprot.writeString(self.email.encode('utf-8')", "= roleId self.categoryId = categoryId self.status = status self.name = name self.email =", "None: oprot.writeFieldBegin('headers', TType.MAP, 2) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.headers)) for kiter30, viter31 in self.headers.items(): oprot.writeString(kiter30.encode('utf-8')", "Execute(object): \"\"\" Attributes: - action - request - context \"\"\" def __init__(self, action=None,", "== TType.I64: self.id = iprot.readI64() else: iprot.skip(ftype) elif fid == 2: if ftype", "# 3 ) all_structs.append(Action) Action.thrift_spec = ( None, # 0 (1, TType.STRING, 'name',", "iprot.readString() self.headers[_key69] = _val70 iprot.readMapEnd() else: iprot.skip(ftype) elif fid == 3: if ftype", "1: if ftype == TType.I64: self.id = iprot.readI64() else: iprot.skip(ftype) elif fid ==", "( None, # 0 (1, TType.STRUCT, 'response', [Response, None], None, ), # 1", "in self.events: iter62.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.logs is not None: oprot.writeFieldBegin('logs', TType.LIST, 3)", "TType.STRING, 6) oprot.writeString(self.email.encode('utf-8') if sys.version_info[0] == 2 else self.email) oprot.writeFieldEnd() if self.points is", "== other) class App(object): \"\"\" Attributes: - id - userId - status -", "TType.STOP: break if fid == 1: if ftype == TType.STRUCT: self.response = Response()", "not None and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Event') if", "2 else iprot.readString() _val6 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()", "== 1: if ftype == TType.STRUCT: self.http = HttpRequest() self.http.read(iprot) else: iprot.skip(ftype) elif", "def __init__(self, statusCode=None, headers=None, body=None,): self.statusCode = statusCode self.headers = headers self.body =", "oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.uriFragments)) for kiter32, viter33 in self.uriFragments.items(): oprot.writeString(kiter32.encode('utf-8') if sys.version_info[0] == 2", "iprot.skip(ftype) elif fid == 2: if ftype == TType.STRUCT: self.request = Request() self.request.read(iprot)", "None: oprot.writeFieldBegin('context', TType.STRUCT, 3) self.context.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self):", "== TType.STRUCT: self.user = User() self.user.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def", "(_ktype1, _vtype2, _size0) = iprot.readMapBegin() for _i4 in range(_size0): _key5 = iprot.readString().decode('utf-8', errors='replace')", "== TType.STRUCT: self.request = Request() self.request.read(iprot) else: iprot.skip(ftype) elif fid == 3: if", "thrift.TRecursive import fix_spec import sys from thrift.transport import TTransport all_structs = [] class", "[User, None], None, ), # 4 ) all_structs.append(App) App.thrift_spec = ( None, #", "self.eventName) oprot.writeFieldEnd() if self.data is not None: oprot.writeFieldBegin('data', TType.STRING, 2) oprot.writeString(self.data.encode('utf-8') if sys.version_info[0]", "0 (1, TType.BOOL, 'success', None, None, ), # 1 (2, TType.STRING, 'message', 'UTF8',", "self.status = status self.name = name self.url = url self.appKey = appKey self.scopes", "1: if ftype == TType.STRING: self.name = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2", "elif fid == 5: if ftype == TType.STRING: self.body = iprot.readString().decode('utf-8', errors='replace') if", "== 2: if ftype == TType.STRING: self.type = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] ==", "self.url) oprot.writeFieldEnd() if self.appKey is not None: oprot.writeFieldBegin('appKey', TType.STRING, 6) oprot.writeString(self.appKey.encode('utf-8') if sys.version_info[0]", "== TType.I32: self.statusCode = iprot.readI32() else: iprot.skip(ftype) elif fid == 2: if ftype", "ftype == TType.STOP: break if fid == 1: if ftype == TType.STRING: self.level", "uriFragments self.parameters = parameters self.body = body def read(self, iprot): if iprot._fast_decode is", "iprot.readString() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRING: self.code =", "not None: oprot.writeFieldBegin('headers', TType.MAP, 2) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.headers)) for kiter71, viter72 in self.headers.items():", "== 2 else iprot.readString() self.headers[_key69] = _val70 iprot.readMapEnd() else: iprot.skip(ftype) elif fid ==", "TType.STRING, 'email', 'UTF8', None, ), # 6 (7, TType.I32, 'points', None, None, ),", "self.statusCode = iprot.readI32() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.MAP:", "2 else self.name) oprot.writeFieldEnd() if self.type is not None: oprot.writeFieldBegin('type', TType.STRING, 2) oprot.writeString(self.type.encode('utf-8')", "3 (4, TType.I32, 'status', None, None, ), # 4 (5, TType.STRING, 'name', 'UTF8',", "2) self.rpc.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r'", "ftype == TType.I32: self.statusCode = iprot.readI32() else: iprot.skip(ftype) elif fid == 2: if", "TType.MAP, 'parameters', (TType.STRING, 'UTF8', TType.STRING, 'UTF8', False), None, ), # 4 (5, TType.STRING,", "== other) class Result(object): \"\"\" Attributes: - response - events - logs \"\"\"", "fid == 1: if ftype == TType.I64: self.id = iprot.readI64() else: iprot.skip(ftype) elif", "[self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Event') if self.eventName is not None: oprot.writeFieldBegin('eventName', TType.STRING, 1) oprot.writeString(self.eventName.encode('utf-8')", "iprot.skip(ftype) elif fid == 8: if ftype == TType.LIST: self.parameters = [] (_etype45,", "self.thrift_spec]) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype ==", "# 1 ) all_structs.append(Context) Context.thrift_spec = ( None, # 0 (1, TType.I64, 'routeId',", "if sys.version_info[0] == 2 else viter33) oprot.writeMapEnd() oprot.writeFieldEnd() if self.parameters is not None:", "), # 1 (2, TType.STRUCT, 'request', [Request, None], None, ), # 2 (3,", "None, # 0 (1, TType.STRING, 'name', 'UTF8', None, ), # 1 (2, TType.STRING,", "TType.STRING: self.email = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() else: iprot.skip(ftype)", "None, None, ), # 1 (2, TType.STRING, 'message', 'UTF8', None, ), # 2", "), # 2 (3, TType.I32, 'status', None, None, ), # 3 (4, TType.STRING,", "== TType.MAP: self.uriFragments = {} (_ktype17, _vtype18, _size16) = iprot.readMapBegin() for _i20 in", "- points \"\"\" def __init__(self, id=None, roleId=None, categoryId=None, status=None, name=None, email=None, points=None,): self.id", "False), None, ), # 7 (8, TType.LIST, 'parameters', (TType.STRING, 'UTF8', False), None, ),", "2 else self.arguments) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L =", "None, None, ), # 1 (2, TType.STRING, 'baseUrl', 'UTF8', None, ), # 2", "TType.MAP: self.uriFragments = {} (_ktype17, _vtype18, _size16) = iprot.readMapBegin() for _i20 in range(_size16):", "is not None: oprot.writeFieldBegin('url', TType.STRING, 5) oprot.writeString(self.url.encode('utf-8') if sys.version_info[0] == 2 else self.url)", "), # 2 (3, TType.MAP, 'uriFragments', (TType.STRING, 'UTF8', TType.STRING, 'UTF8', False), None, ),", "# 4 ) all_structs.append(App) App.thrift_spec = ( None, # 0 (1, TType.I64, 'id',", "body \"\"\" def __init__(self, statusCode=None, headers=None, body=None,): self.statusCode = statusCode self.headers = headers", "message \"\"\" def __init__(self, success=None, message=None,): self.success = success self.message = message def", "not (self == other) class Execute(object): \"\"\" Attributes: - action - request -", "self.categoryId = categoryId self.status = status self.name = name self.email = email self.points", "TType.STOP: break if fid == 1: if ftype == TType.STRING: self.action = iprot.readString().decode('utf-8',", "if sys.version_info[0] == 2 else self.name) oprot.writeFieldEnd() if self.code is not None: oprot.writeFieldBegin('code',", "= [] (_etype59, _size56) = iprot.readListBegin() for _i60 in range(_size56): _elem61 = Log()", "- http - rpc \"\"\" def __init__(self, http=None, rpc=None,): self.http = http self.rpc", "- baseUrl - app - user \"\"\" def __init__(self, routeId=None, baseUrl=None, app=None, user=None,):", "Request.thrift_spec = ( None, # 0 (1, TType.STRUCT, 'http', [HttpRequest, None], None, ),", "TType.STRING: self.message = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() else: iprot.skip(ftype)", "TType.STRING, 'baseUrl', 'UTF8', None, ), # 2 (3, TType.STRUCT, 'app', [App, None], None,", "is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None: iprot._fast_decode(self, iprot,", "== 2 else viter8) oprot.writeMapEnd() oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self):", "# 1 (2, TType.LIST, 'events', (TType.STRUCT, [Event, None], False), None, ), # 2", "is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Event') if self.eventName is not None:", "oprot.writeFieldEnd() if self.parameters is not None: oprot.writeFieldBegin('parameters', TType.LIST, 8) oprot.writeListBegin(TType.STRING, len(self.parameters)) for iter49", "# 0 (1, TType.STRING, 'action', 'UTF8', None, ), # 1 (2, TType.STRUCT, 'request',", "else: iprot.skip(ftype) elif fid == 3: if ftype == TType.STRUCT: self.context = Context()", "else iprot.readString() else: iprot.skip(ftype) elif fid == 7: if ftype == TType.LIST: self.scopes", "self.data = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() else: iprot.skip(ftype) else:", "[RpcRequest, None], None, ), # 2 ) all_structs.append(HttpRequest) HttpRequest.thrift_spec = ( None, #", "userId self.status = status self.name = name self.url = url self.appKey = appKey", "'statusCode', None, None, ), # 1 (2, TType.MAP, 'headers', (TType.STRING, 'UTF8', TType.STRING, 'UTF8',", "oprot.writeString(viter31.encode('utf-8') if sys.version_info[0] == 2 else viter31) oprot.writeMapEnd() oprot.writeFieldEnd() if self.uriFragments is not", "not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Result') if self.response is not None: oprot.writeFieldBegin('response',", "None: oprot.writeFieldBegin('body', TType.STRING, 5) oprot.writeString(self.body.encode('utf-8') if sys.version_info[0] == 2 else self.body) oprot.writeFieldEnd() oprot.writeFieldStop()", "2 else kiter30) oprot.writeString(viter31.encode('utf-8') if sys.version_info[0] == 2 else viter31) oprot.writeMapEnd() oprot.writeFieldEnd() if", "'UTF8', None, ), # 5 (6, TType.STRING, 'appKey', 'UTF8', None, ), # 6", "routeId self.baseUrl = baseUrl self.app = app self.user = user def read(self, iprot):", "== 4: if ftype == TType.STRUCT: self.user = User() self.user.read(iprot) else: iprot.skip(ftype) else:", "iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot._fast_encode is not None", "self.app = App() self.app.read(iprot) else: iprot.skip(ftype) elif fid == 4: if ftype ==", "'scopes', (TType.STRING, 'UTF8', False), None, ), # 7 (8, TType.LIST, 'parameters', (TType.STRING, 'UTF8',", ") all_structs.append(Execute) Execute.thrift_spec = ( None, # 0 (1, TType.STRING, 'action', 'UTF8', None,", "# 6 (7, TType.I32, 'points', None, None, ), # 7 ) all_structs.append(Result) Result.thrift_spec", "self.rpc = rpc def read(self, iprot): if iprot._fast_decode is not None and isinstance(iprot.trans,", "_size23) = iprot.readMapBegin() for _i27 in range(_size23): _key28 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0]", "None: iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) return iprot.readStructBegin() while True: (fname, ftype, fid) =", "break if fid == 1: if ftype == TType.STRUCT: self.http = HttpRequest() self.http.read(iprot)", "elif fid == 5: if ftype == TType.STRING: self.url = iprot.readString().decode('utf-8', errors='replace') if", "TType.STRING, 'body', 'UTF8', None, ), # 5 ) all_structs.append(RpcRequest) RpcRequest.thrift_spec = ( None,", "6 (7, TType.LIST, 'scopes', (TType.STRING, 'UTF8', False), None, ), # 7 (8, TType.LIST,", "in range(_size64): _key69 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() _val70", "2 else iprot.readString() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.MAP:", "oprot.writeFieldBegin('name', TType.STRING, 5) oprot.writeString(self.name.encode('utf-8') if sys.version_info[0] == 2 else self.name) oprot.writeFieldEnd() if self.email", "if ftype == TType.MAP: self.config = {} (_ktype1, _vtype2, _size0) = iprot.readMapBegin() for", "not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Request') if self.http is not None: oprot.writeFieldBegin('http',", "_i27 in range(_size23): _key28 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()", "def __ne__(self, other): return not (self == other) class Result(object): \"\"\" Attributes: -", "1) oprot.writeString(self.name.encode('utf-8') if sys.version_info[0] == 2 else self.name) oprot.writeFieldEnd() if self.code is not", "TType.STRING, 2) oprot.writeString(self.message.encode('utf-8') if sys.version_info[0] == 2 else self.message) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def", "name - type - config \"\"\" def __init__(self, name=None, type=None, config=None,): self.name =", "== TType.STOP: break if fid == 1: if ftype == TType.STRING: self.level =", "if fid == 1: if ftype == TType.STRUCT: self.response = Response() self.response.read(iprot) else:", "if ftype == TType.I32: self.points = iprot.readI32() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd()", "1) self.response.write(oprot) oprot.writeFieldEnd() if self.events is not None: oprot.writeFieldBegin('events', TType.LIST, 2) oprot.writeListBegin(TType.STRUCT, len(self.events))", "not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('RpcRequest') if self.arguments is not None: oprot.writeFieldBegin('arguments',", "__init__(self, eventName=None, data=None,): self.eventName = eventName self.data = data def read(self, iprot): if", "sys.version_info[0] == 2 else self.name) oprot.writeFieldEnd() if self.code is not None: oprot.writeFieldBegin('code', TType.STRING,", "None, ), # 3 ) all_structs.append(Request) Request.thrift_spec = ( None, # 0 (1,", "if sys.version_info[0] == 2 else self.name) oprot.writeFieldEnd() if self.url is not None: oprot.writeFieldBegin('url',", "None: oprot.writeFieldBegin('baseUrl', TType.STRING, 2) oprot.writeString(self.baseUrl.encode('utf-8') if sys.version_info[0] == 2 else self.baseUrl) oprot.writeFieldEnd() if", "TType.LIST: self.parameters = [] (_etype45, _size42) = iprot.readListBegin() for _i46 in range(_size42): _elem47", "iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot._fast_encode is not None and self.thrift_spec", "= userId self.status = status self.name = name self.url = url self.appKey =", "6) oprot.writeString(self.email.encode('utf-8') if sys.version_info[0] == 2 else self.email) oprot.writeFieldEnd() if self.points is not", "3) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.uriFragments)) for kiter32, viter33 in self.uriFragments.items(): oprot.writeString(kiter32.encode('utf-8') if sys.version_info[0] ==", "self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class Response(object):", "TType.STRUCT, 'app', [App, None], None, ), # 3 (4, TType.STRUCT, 'user', [User, None],", "other) class Execute(object): \"\"\" Attributes: - action - request - context \"\"\" def", "( None, # 0 (1, TType.STRING, 'arguments', 'UTF8', None, ), # 1 )", "sys.version_info[0] == 2 else self.baseUrl) oprot.writeFieldEnd() if self.app is not None: oprot.writeFieldBegin('app', TType.STRUCT,", "all_structs.append(Execute) Execute.thrift_spec = ( None, # 0 (1, TType.STRING, 'action', 'UTF8', None, ),", "if self.scopes is not None: oprot.writeFieldBegin('scopes', TType.LIST, 7) oprot.writeListBegin(TType.STRING, len(self.scopes)) for iter48 in", "return not (self == other) class Connection(object): \"\"\" Attributes: - name - type", "in range(_size50): _elem55 = Event() _elem55.read(iprot) self.events.append(_elem55) iprot.readListEnd() else: iprot.skip(ftype) elif fid ==", "break if fid == 1: if ftype == TType.STRING: self.eventName = iprot.readString().decode('utf-8', errors='replace')", "RpcRequest.thrift_spec = ( None, # 0 (1, TType.STRING, 'arguments', 'UTF8', None, ), #", "TType.I64, 'id', None, None, ), # 1 (2, TType.I64, 'roleId', None, None, ),", "elif fid == 2: if ftype == TType.STRUCT: self.request = Request() self.request.read(iprot) else:", "self.app = app self.user = user def read(self, iprot): if iprot._fast_decode is not", "level - message \"\"\" def __init__(self, level=None, message=None,): self.level = level self.message =", "other): return not (self == other) class Context(object): \"\"\" Attributes: - routeId -", "= iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() _val70 = iprot.readString().decode('utf-8', errors='replace')", "None, ), # 1 (2, TType.I64, 'roleId', None, None, ), # 2 (3,", "Result.thrift_spec = ( None, # 0 (1, TType.STRUCT, 'response', [Response, None], None, ),", "TType.STRING, 5) oprot.writeString(self.name.encode('utf-8') if sys.version_info[0] == 2 else self.name) oprot.writeFieldEnd() if self.email is", "if sys.version_info[0] == 2 else iprot.readString() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def", "oprot.writeStructBegin('Response') if self.statusCode is not None: oprot.writeFieldBegin('statusCode', TType.I32, 1) oprot.writeI32(self.statusCode) oprot.writeFieldEnd() if self.headers", "oprot.writeI32(self.statusCode) oprot.writeFieldEnd() if self.headers is not None: oprot.writeFieldBegin('headers', TType.MAP, 2) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.headers))", "== 2 else self.email) oprot.writeFieldEnd() if self.points is not None: oprot.writeFieldBegin('points', TType.I32, 7)", "4) oprot.writeString(self.name.encode('utf-8') if sys.version_info[0] == 2 else self.name) oprot.writeFieldEnd() if self.url is not", "2 else iprot.readString() _val15 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()", "logs=None,): self.response = response self.events = events self.logs = logs def read(self, iprot):", "if ftype == TType.STRING: self.code = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else", "__ne__(self, other): return not (self == other) class Context(object): \"\"\" Attributes: - routeId", "- name - type - config \"\"\" def __init__(self, name=None, type=None, config=None,): self.name", "return not (self == other) class Result(object): \"\"\" Attributes: - response - events", "= routeId self.baseUrl = baseUrl self.app = app self.user = user def read(self,", "self.thrift_spec])) return oprot.writeStructBegin('RpcRequest') if self.arguments is not None: oprot.writeFieldBegin('arguments', TType.STRING, 1) oprot.writeString(self.arguments.encode('utf-8') if", "', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def", "ftype == TType.STRING: self.message = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()", "# 6 (7, TType.LIST, 'scopes', (TType.STRING, 'UTF8', False), None, ), # 7 (8,", "== 2 else viter72) oprot.writeMapEnd() oprot.writeFieldEnd() if self.body is not None: oprot.writeFieldBegin('body', TType.STRING,", "email self.points = points def read(self, iprot): if iprot._fast_decode is not None and", "and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Action') if self.name is", "# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU", "oprot.writeFieldEnd() if self.logs is not None: oprot.writeFieldBegin('logs', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.logs)) for iter63", "self.thrift_spec])) return oprot.writeStructBegin('HttpRequest') if self.method is not None: oprot.writeFieldBegin('method', TType.STRING, 1) oprot.writeString(self.method.encode('utf-8') if", "== TType.STRUCT: self.app = App() self.app.read(iprot) else: iprot.skip(ftype) elif fid == 4: if", "None, ), # 1 (2, TType.MAP, 'headers', (TType.STRING, 'UTF8', TType.STRING, 'UTF8', False), None,", "if sys.version_info[0] == 2 else self.baseUrl) oprot.writeFieldEnd() if self.app is not None: oprot.writeFieldBegin('app',", "read(self, iprot): if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is", "not None: oprot.writeFieldBegin('parameters', TType.LIST, 8) oprot.writeListBegin(TType.STRING, len(self.parameters)) for iter49 in self.parameters: oprot.writeString(iter49.encode('utf-8') if", "self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class Context(object):", "TType.STRING, 'message', 'UTF8', None, ), # 2 ) all_structs.append(Connection) Connection.thrift_spec = ( None,", "oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.headers)) for kiter30, viter31 in self.headers.items(): oprot.writeString(kiter30.encode('utf-8') if sys.version_info[0] == 2", "oprot.writeFieldBegin('app', TType.STRUCT, 3) self.app.write(oprot) oprot.writeFieldEnd() if self.user is not None: oprot.writeFieldBegin('user', TType.STRUCT, 4)", "if sys.version_info[0] == 2 else self.message) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def", "Response.thrift_spec = ( None, # 0 (1, TType.I32, 'statusCode', None, None, ), #", "self.id = iprot.readI64() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.I64:", "oprot.writeString(self.action.encode('utf-8') if sys.version_info[0] == 2 else self.action) oprot.writeFieldEnd() if self.request is not None:", "User() self.user.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot._fast_encode", "2) self.request.write(oprot) oprot.writeFieldEnd() if self.context is not None: oprot.writeFieldBegin('context', TType.STRUCT, 3) self.context.write(oprot) oprot.writeFieldEnd()", "other.__dict__ def __ne__(self, other): return not (self == other) class HttpRequest(object): \"\"\" Attributes:", "success=None, message=None,): self.success = success self.message = message def read(self, iprot): if iprot._fast_decode", "== TType.STOP: break if fid == 1: if ftype == TType.BOOL: self.success =", "headers self.uriFragments = uriFragments self.parameters = parameters self.body = body def read(self, iprot):", "if ftype == TType.STRING: self.arguments = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else", "else self.name) oprot.writeFieldEnd() if self.email is not None: oprot.writeFieldBegin('email', TType.STRING, 6) oprot.writeString(self.email.encode('utf-8') if", "_vtype25, _size23) = iprot.readMapBegin() for _i27 in range(_size23): _key28 = iprot.readString().decode('utf-8', errors='replace') if", "2: if ftype == TType.STRUCT: self.rpc = RpcRequest() self.rpc.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype)", "== other) class User(object): \"\"\" Attributes: - id - roleId - categoryId -", "__ne__(self, other): return not (self == other) class Event(object): \"\"\" Attributes: - eventName", "iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() self.scopes.append(_elem41) iprot.readListEnd() else: iprot.skip(ftype) elif", "viter31) oprot.writeMapEnd() oprot.writeFieldEnd() if self.uriFragments is not None: oprot.writeFieldBegin('uriFragments', TType.MAP, 3) oprot.writeMapBegin(TType.STRING, TType.STRING,", "def __init__(self, name=None, type=None, config=None,): self.name = name self.type = type self.config =", "== 2 else iprot.readString() _val22 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else", "if ftype == TType.LIST: self.scopes = [] (_etype39, _size36) = iprot.readListBegin() for _i40", "\"\"\" Attributes: - id - userId - status - name - url -", "Request(object): \"\"\" Attributes: - http - rpc \"\"\" def __init__(self, http=None, rpc=None,): self.http", "iter49 in self.parameters: oprot.writeString(iter49.encode('utf-8') if sys.version_info[0] == 2 else iter49) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop()", "# 2 (3, TType.STRING, 'body', 'UTF8', None, ), # 3 ) all_structs.append(Event) Event.thrift_spec", "App(object): \"\"\" Attributes: - id - userId - status - name - url", "# 2 (3, TType.MAP, 'config', (TType.STRING, 'UTF8', TType.STRING, 'UTF8', False), None, ), #", "(self == other) class Action(object): \"\"\" Attributes: - name - code \"\"\" def", "# 2 (3, TType.STRUCT, 'app', [App, None], None, ), # 3 (4, TType.STRUCT,", "ftype == TType.STRUCT: self.user = User() self.user.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd()", "TType.I32, 7) oprot.writeI32(self.points) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L =", "fid == 3: if ftype == TType.STRING: self.body = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0]", "__init__(self, success=None, message=None,): self.success = success self.message = message def read(self, iprot): if", "oprot.writeMapEnd() oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r' %", "self.headers.items(): oprot.writeString(kiter30.encode('utf-8') if sys.version_info[0] == 2 else kiter30) oprot.writeString(viter31.encode('utf-8') if sys.version_info[0] == 2", "= iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() _val15 = iprot.readString().decode('utf-8', errors='replace')", "__init__(self, arguments=None,): self.arguments = arguments def read(self, iprot): if iprot._fast_decode is not None", "in self.config.items(): oprot.writeString(kiter7.encode('utf-8') if sys.version_info[0] == 2 else kiter7) oprot.writeString(viter8.encode('utf-8') if sys.version_info[0] ==", "not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Action') if self.name is not None: oprot.writeFieldBegin('name',", "self.code is not None: oprot.writeFieldBegin('code', TType.STRING, 2) oprot.writeString(self.code.encode('utf-8') if sys.version_info[0] == 2 else", "is not None and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Event')", "), # 5 (6, TType.STRING, 'email', 'UTF8', None, ), # 6 (7, TType.I32,", "class Connection(object): \"\"\" Attributes: - name - type - config \"\"\" def __init__(self,", "other.__dict__ def __ne__(self, other): return not (self == other) class App(object): \"\"\" Attributes:", "ftype == TType.STOP: break if fid == 1: if ftype == TType.STRUCT: self.response", "TType.STRING, 'url', 'UTF8', None, ), # 5 (6, TType.STRING, 'appKey', 'UTF8', None, ),", "is not None: oprot.writeFieldBegin('parameters', TType.MAP, 4) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.parameters)) for kiter34, viter35 in", "= rpc def read(self, iprot): if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport)", "errors='replace') if sys.version_info[0] == 2 else iprot.readString() self.parameters[_key28] = _val29 iprot.readMapEnd() else: iprot.skip(ftype)", "self.parameters[_key28] = _val29 iprot.readMapEnd() else: iprot.skip(ftype) elif fid == 5: if ftype ==", "else viter31) oprot.writeMapEnd() oprot.writeFieldEnd() if self.uriFragments is not None: oprot.writeFieldBegin('uriFragments', TType.MAP, 3) oprot.writeMapBegin(TType.STRING,", "fid == 1: if ftype == TType.STRING: self.method = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0]", "if ftype == TType.I32: self.status = iprot.readI32() else: iprot.skip(ftype) elif fid == 5:", "in range(_size23): _key28 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() _val29", "TType.STRING: self.baseUrl = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() else: iprot.skip(ftype)", "oprot.writeListBegin(TType.STRING, len(self.scopes)) for iter48 in self.scopes: oprot.writeString(iter48.encode('utf-8') if sys.version_info[0] == 2 else iter48)", "None and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('User') if self.id", "= iprot.readI32() else: iprot.skip(ftype) elif fid == 5: if ftype == TType.STRING: self.name", "self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class App(object):", "elif fid == 2: if ftype == TType.MAP: self.headers = {} (_ktype10, _vtype11,", "2 (3, TType.STRUCT, 'context', [Context, None], None, ), # 3 ) all_structs.append(Request) Request.thrift_spec", "2 else self.message) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L =", "not (self == other) class Action(object): \"\"\" Attributes: - name - code \"\"\"", "3 (4, TType.STRING, 'name', 'UTF8', None, ), # 4 (5, TType.STRING, 'url', 'UTF8',", "False), None, ), # 4 (5, TType.STRING, 'body', 'UTF8', None, ), # 5", "else self.method) oprot.writeFieldEnd() if self.headers is not None: oprot.writeFieldBegin('headers', TType.MAP, 2) oprot.writeMapBegin(TType.STRING, TType.STRING,", "Autogenerated by Thrift Compiler (0.14.2) # # DO NOT EDIT UNLESS YOU ARE", "% (key, value) for key, value in self.__dict__.items()] return '%s(%s)' % (self.__class__.__name__, ',", "if self.context is not None: oprot.writeFieldBegin('context', TType.STRUCT, 3) self.context.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def", "== 2 else viter35) oprot.writeMapEnd() oprot.writeFieldEnd() if self.body is not None: oprot.writeFieldBegin('body', TType.STRING,", "self.name = name self.url = url self.appKey = appKey self.scopes = scopes self.parameters", "'UTF8', TType.STRING, 'UTF8', False), None, ), # 2 (3, TType.STRING, 'body', 'UTF8', None,", "TType.I64, 'categoryId', None, None, ), # 3 (4, TType.I32, 'status', None, None, ),", "TType.MAP, 'uriFragments', (TType.STRING, 'UTF8', TType.STRING, 'UTF8', False), None, ), # 3 (4, TType.MAP,", "request - context \"\"\" def __init__(self, action=None, request=None, context=None,): self.action = action self.request", "elif fid == 4: if ftype == TType.MAP: self.parameters = {} (_ktype24, _vtype25,", "(_etype53, _size50) = iprot.readListBegin() for _i54 in range(_size50): _elem55 = Event() _elem55.read(iprot) self.events.append(_elem55)", "and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Request') if self.http is", "oprot.writeFieldEnd() if self.context is not None: oprot.writeFieldBegin('context', TType.STRUCT, 3) self.context.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd()", "2) oprot.writeListBegin(TType.STRUCT, len(self.events)) for iter62 in self.events: iter62.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.logs is", "'message', 'UTF8', None, ), # 2 ) all_structs.append(Connection) Connection.thrift_spec = ( None, #", "else iprot.readString() else: iprot.skip(ftype) elif fid == 7: if ftype == TType.I32: self.points", "oprot.writeStructBegin('User') if self.id is not None: oprot.writeFieldBegin('id', TType.I64, 1) oprot.writeI64(self.id) oprot.writeFieldEnd() if self.roleId", "oprot.writeFieldBegin('user', TType.STRUCT, 4) self.user.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L", "- url - appKey - scopes - parameters \"\"\" def __init__(self, id=None, userId=None,", "3: if ftype == TType.LIST: self.logs = [] (_etype59, _size56) = iprot.readListBegin() for", "else self.name) oprot.writeFieldEnd() if self.type is not None: oprot.writeFieldBegin('type', TType.STRING, 2) oprot.writeString(self.type.encode('utf-8') if", "== TType.MAP: self.headers = {} (_ktype65, _vtype66, _size64) = iprot.readMapBegin() for _i68 in", "None, ), # 1 (2, TType.STRING, 'code', 'UTF8', None, ), # 2 )", "if ftype == TType.STRING: self.appKey = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else", "self.roleId = roleId self.categoryId = categoryId self.status = status self.name = name self.email", "- parameters \"\"\" def __init__(self, id=None, userId=None, status=None, name=None, url=None, appKey=None, scopes=None, parameters=None,):", "and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Result') if self.response is", "sys.version_info[0] == 2 else iprot.readString() _val29 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2", "iter49) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r'", "2 else iprot.readString() self.headers[_key69] = _val70 iprot.readMapEnd() else: iprot.skip(ftype) elif fid == 3:", "def __init__(self, method=None, headers=None, uriFragments=None, parameters=None, body=None,): self.method = method self.headers = headers", "), # 8 ) all_structs.append(User) User.thrift_spec = ( None, # 0 (1, TType.I64,", "User(object): \"\"\" Attributes: - id - roleId - categoryId - status - name", "all_structs.append(Context) Context.thrift_spec = ( None, # 0 (1, TType.I64, 'routeId', None, None, ),", "Attributes: - id - userId - status - name - url - appKey", "is not None: oprot.writeFieldBegin('http', TType.STRUCT, 1) self.http.write(oprot) oprot.writeFieldEnd() if self.rpc is not None:", "TType.STRUCT, 'request', [Request, None], None, ), # 2 (3, TType.STRUCT, 'context', [Context, None],", "sys.version_info[0] == 2 else viter8) oprot.writeMapEnd() oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def", "(2, TType.STRING, 'data', 'UTF8', None, ), # 2 ) all_structs.append(Log) Log.thrift_spec = (", "response self.events = events self.logs = logs def read(self, iprot): if iprot._fast_decode is", "Action.thrift_spec = ( None, # 0 (1, TType.STRING, 'name', 'UTF8', None, ), #", "TType.STRUCT, 'response', [Response, None], None, ), # 1 (2, TType.LIST, 'events', (TType.STRUCT, [Event,", "self.scopes = scopes self.parameters = parameters def read(self, iprot): if iprot._fast_decode is not", "== other.__dict__ def __ne__(self, other): return not (self == other) all_structs.append(Message) Message.thrift_spec =", "self.code = code def read(self, iprot): if iprot._fast_decode is not None and isinstance(iprot.trans,", "self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Response') if self.statusCode is not", "oprot.writeFieldEnd() if self.message is not None: oprot.writeFieldBegin('message', TType.STRING, 2) oprot.writeString(self.message.encode('utf-8') if sys.version_info[0] ==", "2 else viter35) oprot.writeMapEnd() oprot.writeFieldEnd() if self.body is not None: oprot.writeFieldBegin('body', TType.STRING, 5)", "TType.STRING, 1) oprot.writeString(self.name.encode('utf-8') if sys.version_info[0] == 2 else self.name) oprot.writeFieldEnd() if self.type is", "request=None, context=None,): self.action = action self.request = request self.context = context def read(self,", "iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() self.config[_key5] = _val6 iprot.readMapEnd() else:", "else: iprot.skip(ftype) elif fid == 5: if ftype == TType.STRING: self.url = iprot.readString().decode('utf-8',", "3: if ftype == TType.MAP: self.uriFragments = {} (_ktype17, _vtype18, _size16) = iprot.readMapBegin()", "self.context = context def read(self, iprot): if iprot._fast_decode is not None and isinstance(iprot.trans,", "fid == 3: if ftype == TType.STRUCT: self.app = App() self.app.read(iprot) else: iprot.skip(ftype)", "elif fid == 7: if ftype == TType.LIST: self.scopes = [] (_etype39, _size36)", "return not (self == other) class HttpRequest(object): \"\"\" Attributes: - method - headers", "def __init__(self, arguments=None,): self.arguments = arguments def read(self, iprot): if iprot._fast_decode is not", "None: oprot.writeFieldBegin('routeId', TType.I64, 1) oprot.writeI64(self.routeId) oprot.writeFieldEnd() if self.baseUrl is not None: oprot.writeFieldBegin('baseUrl', TType.STRING,", "2) oprot.writeString(self.data.encode('utf-8') if sys.version_info[0] == 2 else self.data) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self):", "iprot.readI32() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot._fast_encode is", "self.type is not None: oprot.writeFieldBegin('type', TType.STRING, 2) oprot.writeString(self.type.encode('utf-8') if sys.version_info[0] == 2 else", "oprot.writeI64(self.userId) oprot.writeFieldEnd() if self.status is not None: oprot.writeFieldBegin('status', TType.I32, 3) oprot.writeI32(self.status) oprot.writeFieldEnd() if", "if self.categoryId is not None: oprot.writeFieldBegin('categoryId', TType.I64, 3) oprot.writeI64(self.categoryId) oprot.writeFieldEnd() if self.status is", "- level - message \"\"\" def __init__(self, level=None, message=None,): self.level = level self.message", "self.name = name self.code = code def read(self, iprot): if iprot._fast_decode is not", "elif fid == 4: if ftype == TType.I32: self.status = iprot.readI32() else: iprot.skip(ftype)", "self.events: iter62.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.logs is not None: oprot.writeFieldBegin('logs', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT,", "TType.MAP, 2) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.headers)) for kiter71, viter72 in self.headers.items(): oprot.writeString(kiter71.encode('utf-8') if sys.version_info[0]", "= ( None, # 0 (1, TType.STRING, 'action', 'UTF8', None, ), # 1", "), # 2 ) all_structs.append(Log) Log.thrift_spec = ( None, # 0 (1, TType.STRING,", "break if fid == 1: if ftype == TType.I64: self.id = iprot.readI64() else:", "'parameters', (TType.STRING, 'UTF8', TType.STRING, 'UTF8', False), None, ), # 4 (5, TType.STRING, 'body',", "TType.STRING, 'name', 'UTF8', None, ), # 1 (2, TType.STRING, 'type', 'UTF8', None, ),", "2: if ftype == TType.I64: self.userId = iprot.readI64() else: iprot.skip(ftype) elif fid ==", "if sys.version_info[0] == 2 else iprot.readString() _val70 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] ==", "self.name = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() else: iprot.skip(ftype) elif", "if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRUCT:", "# # Autogenerated by Thrift Compiler (0.14.2) # # DO NOT EDIT UNLESS", "__ne__(self, other): return not (self == other) class Request(object): \"\"\" Attributes: - http", "None, ), # 3 (4, TType.I32, 'status', None, None, ), # 4 (5,", "else: iprot.skip(ftype) elif fid == 3: if ftype == TType.LIST: self.logs = []", "4: if ftype == TType.STRING: self.name = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2", "oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Context') if self.routeId is not None: oprot.writeFieldBegin('routeId', TType.I64, 1)", "if self.userId is not None: oprot.writeFieldBegin('userId', TType.I64, 2) oprot.writeI64(self.userId) oprot.writeFieldEnd() if self.status is", "= iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() self.headers[_key69] = _val70 iprot.readMapEnd()", "else iter49) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L =", "# 0 (1, TType.STRUCT, 'response', [Response, None], None, ), # 1 (2, TType.LIST,", "self.parameters = parameters self.body = body def read(self, iprot): if iprot._fast_decode is not", "fid == 2: if ftype == TType.LIST: self.events = [] (_etype53, _size50) =", "oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r' %", "None: oprot.writeFieldBegin('action', TType.STRING, 1) oprot.writeString(self.action.encode('utf-8') if sys.version_info[0] == 2 else self.action) oprot.writeFieldEnd() if", "None, ), # 2 (3, TType.STRUCT, 'context', [Context, None], None, ), # 3", "not None: oprot.writeFieldBegin('config', TType.MAP, 3) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.config)) for kiter7, viter8 in self.config.items():", "= Context() self.context.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if", "is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('HttpRequest') if self.method is not None:", "self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Action') if self.name is not", "TType.LIST: self.logs = [] (_etype59, _size56) = iprot.readListBegin() for _i60 in range(_size56): _elem61", "not (self == other) class Connection(object): \"\"\" Attributes: - name - type -", "\"\"\" def __init__(self, statusCode=None, headers=None, body=None,): self.statusCode = statusCode self.headers = headers self.body", "# 4 (5, TType.STRING, 'name', 'UTF8', None, ), # 5 (6, TType.STRING, 'email',", "2 else self.url) oprot.writeFieldEnd() if self.appKey is not None: oprot.writeFieldBegin('appKey', TType.STRING, 6) oprot.writeString(self.appKey.encode('utf-8')", "not None and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Execute') if", "= User() self.user.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if", "oprot.writeFieldEnd() if self.status is not None: oprot.writeFieldBegin('status', TType.I32, 3) oprot.writeI32(self.status) oprot.writeFieldEnd() if self.name", "iprot.readListBegin() for _i60 in range(_size56): _elem61 = Log() _elem61.read(iprot) self.logs.append(_elem61) iprot.readListEnd() else: iprot.skip(ftype)", "not None: oprot.writeFieldBegin('logs', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.logs)) for iter63 in self.logs: iter63.write(oprot) oprot.writeListEnd()", "is not None: oprot.writeFieldBegin('points', TType.I32, 7) oprot.writeI32(self.points) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return", "None, ), # 5 (6, TType.STRING, 'email', 'UTF8', None, ), # 6 (7,", "__repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.items()] return", "TType.STOP: break if fid == 1: if ftype == TType.STRUCT: self.http = HttpRequest()", "is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Request') if self.http is not None:", "(TType.STRING, 'UTF8', False), None, ), # 7 (8, TType.LIST, 'parameters', (TType.STRING, 'UTF8', False),", "'UTF8', TType.STRING, 'UTF8', False), None, ), # 3 (4, TType.MAP, 'parameters', (TType.STRING, 'UTF8',", "not (self == other) class Request(object): \"\"\" Attributes: - http - rpc \"\"\"", "[self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Context') if self.routeId is not None: oprot.writeFieldBegin('routeId', TType.I64, 1) oprot.writeI64(self.routeId)", "self.name) oprot.writeFieldEnd() if self.type is not None: oprot.writeFieldBegin('type', TType.STRING, 2) oprot.writeString(self.type.encode('utf-8') if sys.version_info[0]", "sys.version_info[0] == 2 else kiter32) oprot.writeString(viter33.encode('utf-8') if sys.version_info[0] == 2 else viter33) oprot.writeMapEnd()", "\"\"\" def __init__(self, success=None, message=None,): self.success = success self.message = message def read(self,", "== TType.I64: self.roleId = iprot.readI64() else: iprot.skip(ftype) elif fid == 3: if ftype", "3 ) all_structs.append(Request) Request.thrift_spec = ( None, # 0 (1, TType.STRUCT, 'http', [HttpRequest,", "self.email is not None: oprot.writeFieldBegin('email', TType.STRING, 6) oprot.writeString(self.email.encode('utf-8') if sys.version_info[0] == 2 else", "TType.I64: self.id = iprot.readI64() else: iprot.skip(ftype) elif fid == 2: if ftype ==", "else iprot.readString() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRING: self.data", "status - name - email - points \"\"\" def __init__(self, id=None, roleId=None, categoryId=None,", "2: if ftype == TType.STRING: self.baseUrl = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2", "is not None: oprot.writeFieldBegin('uriFragments', TType.MAP, 3) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.uriFragments)) for kiter32, viter33 in", "class Log(object): \"\"\" Attributes: - level - message \"\"\" def __init__(self, level=None, message=None,):", "all_structs.append(Message) Message.thrift_spec = ( None, # 0 (1, TType.BOOL, 'success', None, None, ),", "_val29 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() self.parameters[_key28] = _val29", "self.level is not None: oprot.writeFieldBegin('level', TType.STRING, 1) oprot.writeString(self.level.encode('utf-8') if sys.version_info[0] == 2 else", "all_structs.append(App) App.thrift_spec = ( None, # 0 (1, TType.I64, 'id', None, None, ),", "self.method = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() else: iprot.skip(ftype) elif", "is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('User') if self.id is not None:", "None, # 0 (1, TType.STRING, 'eventName', 'UTF8', None, ), # 1 (2, TType.STRING,", "self.eventName = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() else: iprot.skip(ftype) elif", "None: oprot.writeFieldBegin('app', TType.STRUCT, 3) self.app.write(oprot) oprot.writeFieldEnd() if self.user is not None: oprot.writeFieldBegin('user', TType.STRUCT,", "== TType.STRING: self.body = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() else:", "oprot.writeString(self.message.encode('utf-8') if sys.version_info[0] == 2 else self.message) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return", "= ( None, # 0 (1, TType.I32, 'statusCode', None, None, ), # 1", "TType.STRING, 'code', 'UTF8', None, ), # 2 ) all_structs.append(Execute) Execute.thrift_spec = ( None,", "'headers', (TType.STRING, 'UTF8', TType.STRING, 'UTF8', False), None, ), # 2 (3, TType.STRING, 'body',", "TTransport.CReadableTransport) and self.thrift_spec is not None: iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) return iprot.readStructBegin() while", "return oprot.writeStructBegin('Connection') if self.name is not None: oprot.writeFieldBegin('name', TType.STRING, 1) oprot.writeString(self.name.encode('utf-8') if sys.version_info[0]", "errors='replace') if sys.version_info[0] == 2 else iprot.readString() else: iprot.skip(ftype) elif fid == 7:", "iprot.readString() self.headers[_key14] = _val15 iprot.readMapEnd() else: iprot.skip(ftype) elif fid == 3: if ftype", "if ftype == TType.STOP: break if fid == 1: if ftype == TType.I32:", "iprot.readString() else: iprot.skip(ftype) elif fid == 3: if ftype == TType.STRUCT: self.app =", "= app self.user = user def read(self, iprot): if iprot._fast_decode is not None", "oprot.writeBool(self.success) oprot.writeFieldEnd() if self.message is not None: oprot.writeFieldBegin('message', TType.STRING, 2) oprot.writeString(self.message.encode('utf-8') if sys.version_info[0]", "if ftype == TType.STRING: self.url = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else", "= parameters def read(self, iprot): if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport)", "), # 5 ) all_structs.append(RpcRequest) RpcRequest.thrift_spec = ( None, # 0 (1, TType.STRING,", "for iter49 in self.parameters: oprot.writeString(iter49.encode('utf-8') if sys.version_info[0] == 2 else iter49) oprot.writeListEnd() oprot.writeFieldEnd()", "= context def read(self, iprot): if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport)", "{} (_ktype65, _vtype66, _size64) = iprot.readMapBegin() for _i68 in range(_size64): _key69 = iprot.readString().decode('utf-8',", "TType.I64, 2) oprot.writeI64(self.roleId) oprot.writeFieldEnd() if self.categoryId is not None: oprot.writeFieldBegin('categoryId', TType.I64, 3) oprot.writeI64(self.categoryId)", "YOU KNOW WHAT YOU ARE DOING # # options string: py # from", "self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class User(object):", "in range(_size0): _key5 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() _val6", "2 else iter48) oprot.writeListEnd() oprot.writeFieldEnd() if self.parameters is not None: oprot.writeFieldBegin('parameters', TType.LIST, 8)", "is not None and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('User')", "2 else self.baseUrl) oprot.writeFieldEnd() if self.app is not None: oprot.writeFieldBegin('app', TType.STRUCT, 3) self.app.write(oprot)", "iprot.skip(ftype) elif fid == 2: if ftype == TType.STRING: self.code = iprot.readString().decode('utf-8', errors='replace')", "self.parameters is not None: oprot.writeFieldBegin('parameters', TType.MAP, 4) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.parameters)) for kiter34, viter35", "oprot.writeString(self.code.encode('utf-8') if sys.version_info[0] == 2 else self.code) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return", "else iprot.readString() _val22 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() self.uriFragments[_key21]", "2 else iprot.readString() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRUCT:", "baseUrl=None, app=None, user=None,): self.routeId = routeId self.baseUrl = baseUrl self.app = app self.user", "(2, TType.STRING, 'type', 'UTF8', None, ), # 2 (3, TType.MAP, 'config', (TType.STRING, 'UTF8',", "[App, None], None, ), # 3 (4, TType.STRUCT, 'user', [User, None], None, ),", "== 2: if ftype == TType.STRUCT: self.rpc = RpcRequest() self.rpc.read(iprot) else: iprot.skip(ftype) else:", "== 2 else self.message) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L", "ftype == TType.MAP: self.uriFragments = {} (_ktype17, _vtype18, _size16) = iprot.readMapBegin() for _i20", "== other) all_structs.append(Message) Message.thrift_spec = ( None, # 0 (1, TType.BOOL, 'success', None,", "- body \"\"\" def __init__(self, method=None, headers=None, uriFragments=None, parameters=None, body=None,): self.method = method", "oprot.writeStructBegin('Message') if self.success is not None: oprot.writeFieldBegin('success', TType.BOOL, 1) oprot.writeBool(self.success) oprot.writeFieldEnd() if self.message", "roleId=None, categoryId=None, status=None, name=None, email=None, points=None,): self.id = id self.roleId = roleId self.categoryId", "None, ), # 2 (3, TType.STRING, 'body', 'UTF8', None, ), # 3 )", "0 (1, TType.I64, 'id', None, None, ), # 1 (2, TType.I64, 'roleId', None,", "is not None: oprot.writeFieldBegin('id', TType.I64, 1) oprot.writeI64(self.id) oprot.writeFieldEnd() if self.userId is not None:", "ftype == TType.STRING: self.url = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()", "def __init__(self, routeId=None, baseUrl=None, app=None, user=None,): self.routeId = routeId self.baseUrl = baseUrl self.app", "TType.STRING, 'action', 'UTF8', None, ), # 1 (2, TType.STRUCT, 'request', [Request, None], None,", "iprot.readString() else: iprot.skip(ftype) elif fid == 7: if ftype == TType.I32: self.points =", "if sys.version_info[0] == 2 else viter31) oprot.writeMapEnd() oprot.writeFieldEnd() if self.uriFragments is not None:", "iprot.readString() _val22 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() self.uriFragments[_key21] =", "- categoryId - status - name - email - points \"\"\" def __init__(self,", "(_etype39, _size36) = iprot.readListBegin() for _i40 in range(_size36): _elem41 = iprot.readString().decode('utf-8', errors='replace') if", "oprot.writeFieldBegin('status', TType.I32, 4) oprot.writeI32(self.status) oprot.writeFieldEnd() if self.name is not None: oprot.writeFieldBegin('name', TType.STRING, 5)", "TType.I32, 4) oprot.writeI32(self.status) oprot.writeFieldEnd() if self.name is not None: oprot.writeFieldBegin('name', TType.STRING, 5) oprot.writeString(self.name.encode('utf-8')", "DOING # # options string: py # from thrift.Thrift import TType, TMessageType, TFrozenDict,", "self.logs = [] (_etype59, _size56) = iprot.readListBegin() for _i60 in range(_size56): _elem61 =", "other): return not (self == other) class Response(object): \"\"\" Attributes: - statusCode -", "== 1: if ftype == TType.STRING: self.method = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] ==", "fid == 3: if ftype == TType.LIST: self.logs = [] (_etype59, _size56) =", "def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other):", "), # 2 (3, TType.MAP, 'config', (TType.STRING, 'UTF8', TType.STRING, 'UTF8', False), None, ),", "self.headers.items(): oprot.writeString(kiter71.encode('utf-8') if sys.version_info[0] == 2 else kiter71) oprot.writeString(viter72.encode('utf-8') if sys.version_info[0] == 2", "None, None, ), # 2 (3, TType.I64, 'categoryId', None, None, ), # 3", "# 1 (2, TType.STRING, 'data', 'UTF8', None, ), # 2 ) all_structs.append(Log) Log.thrift_spec", "Response(object): \"\"\" Attributes: - statusCode - headers - body \"\"\" def __init__(self, statusCode=None,", "else self.name) oprot.writeFieldEnd() if self.code is not None: oprot.writeFieldBegin('code', TType.STRING, 2) oprot.writeString(self.code.encode('utf-8') if", "class Event(object): \"\"\" Attributes: - eventName - data \"\"\" def __init__(self, eventName=None, data=None,):", "1) oprot.writeString(self.method.encode('utf-8') if sys.version_info[0] == 2 else self.method) oprot.writeFieldEnd() if self.headers is not", "if sys.version_info[0] == 2 else self.code) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def", "categoryId - status - name - email - points \"\"\" def __init__(self, id=None,", "self.logs = logs def read(self, iprot): if iprot._fast_decode is not None and isinstance(iprot.trans,", "= App() self.app.read(iprot) else: iprot.skip(ftype) elif fid == 4: if ftype == TType.STRUCT:", "( None, # 0 (1, TType.STRING, 'level', 'UTF8', None, ), # 1 (2,", "'UTF8', None, ), # 1 (2, TType.MAP, 'headers', (TType.STRING, 'UTF8', TType.STRING, 'UTF8', False),", "[self.__class__, self.thrift_spec])) return oprot.writeStructBegin('HttpRequest') if self.method is not None: oprot.writeFieldBegin('method', TType.STRING, 1) oprot.writeString(self.method.encode('utf-8')", "elif fid == 2: if ftype == TType.STRING: self.baseUrl = iprot.readString().decode('utf-8', errors='replace') if", "= iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() _val29 = iprot.readString().decode('utf-8', errors='replace')", "# 0 (1, TType.STRING, 'name', 'UTF8', None, ), # 1 (2, TType.STRING, 'type',", "= method self.headers = headers self.uriFragments = uriFragments self.parameters = parameters self.body =", "'name', 'UTF8', None, ), # 5 (6, TType.STRING, 'email', 'UTF8', None, ), #", "is not None: oprot.writeFieldBegin('method', TType.STRING, 1) oprot.writeString(self.method.encode('utf-8') if sys.version_info[0] == 2 else self.method)", "None: oprot.writeFieldBegin('uriFragments', TType.MAP, 3) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.uriFragments)) for kiter32, viter33 in self.uriFragments.items(): oprot.writeString(kiter32.encode('utf-8')", "iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() self.uriFragments[_key21] = _val22 iprot.readMapEnd() else:", "in self.uriFragments.items(): oprot.writeString(kiter32.encode('utf-8') if sys.version_info[0] == 2 else kiter32) oprot.writeString(viter33.encode('utf-8') if sys.version_info[0] ==", "# 3 (4, TType.MAP, 'parameters', (TType.STRING, 'UTF8', TType.STRING, 'UTF8', False), None, ), #", "not None: oprot.writeFieldBegin('statusCode', TType.I32, 1) oprot.writeI32(self.statusCode) oprot.writeFieldEnd() if self.headers is not None: oprot.writeFieldBegin('headers',", "else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRING: self.data = iprot.readString().decode('utf-8',", "# # DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT", "self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class Event(object):", "if sys.version_info[0] == 2 else kiter7) oprot.writeString(viter8.encode('utf-8') if sys.version_info[0] == 2 else viter8)", "None, ), # 2 ) all_structs.append(Log) Log.thrift_spec = ( None, # 0 (1,", "oprot.writeFieldBegin('appKey', TType.STRING, 6) oprot.writeString(self.appKey.encode('utf-8') if sys.version_info[0] == 2 else self.appKey) oprot.writeFieldEnd() if self.scopes", "sys.version_info[0] == 2 else self.code) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self):", "iprot.skip(ftype) elif fid == 2: if ftype == TType.STRING: self.type = iprot.readString().decode('utf-8', errors='replace')", "= iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype", "not None: oprot.writeFieldBegin('parameters', TType.MAP, 4) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.parameters)) for kiter34, viter35 in self.parameters.items():", "oprot.writeMapEnd() oprot.writeFieldEnd() if self.body is not None: oprot.writeFieldBegin('body', TType.STRING, 5) oprot.writeString(self.body.encode('utf-8') if sys.version_info[0]", "events=None, logs=None,): self.response = response self.events = events self.logs = logs def read(self,", "None and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Action') if self.name", "self.uriFragments.items(): oprot.writeString(kiter32.encode('utf-8') if sys.version_info[0] == 2 else kiter32) oprot.writeString(viter33.encode('utf-8') if sys.version_info[0] == 2", "not None: oprot.writeFieldBegin('level', TType.STRING, 1) oprot.writeString(self.level.encode('utf-8') if sys.version_info[0] == 2 else self.level) oprot.writeFieldEnd()", "2) oprot.writeI64(self.userId) oprot.writeFieldEnd() if self.status is not None: oprot.writeFieldBegin('status', TType.I32, 3) oprot.writeI32(self.status) oprot.writeFieldEnd()", "self.success = success self.message = message def read(self, iprot): if iprot._fast_decode is not", "None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None: iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])", "'method', 'UTF8', None, ), # 1 (2, TType.MAP, 'headers', (TType.STRING, 'UTF8', TType.STRING, 'UTF8',", "TType.STRING, 2) oprot.writeString(self.type.encode('utf-8') if sys.version_info[0] == 2 else self.type) oprot.writeFieldEnd() if self.config is", "# 0 (1, TType.STRING, 'arguments', 'UTF8', None, ), # 1 ) all_structs.append(Context) Context.thrift_spec", "action - request - context \"\"\" def __init__(self, action=None, request=None, context=None,): self.action =", "for _i13 in range(_size9): _key14 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else", "other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not", "== 2 else self.method) oprot.writeFieldEnd() if self.headers is not None: oprot.writeFieldBegin('headers', TType.MAP, 2)", "sys.version_info[0] == 2 else iprot.readString() _val70 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2", "6: if ftype == TType.STRING: self.email = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2", "other): return not (self == other) all_structs.append(Message) Message.thrift_spec = ( None, # 0", "# 1 (2, TType.STRING, 'message', 'UTF8', None, ), # 2 ) fix_spec(all_structs) del", "_vtype11, _size9) = iprot.readMapBegin() for _i13 in range(_size9): _key14 = iprot.readString().decode('utf-8', errors='replace') if", "if self.id is not None: oprot.writeFieldBegin('id', TType.I64, 1) oprot.writeI64(self.id) oprot.writeFieldEnd() if self.userId is", "TType.STRING: self.data = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() else: iprot.skip(ftype)", "elif fid == 2: if ftype == TType.STRING: self.type = iprot.readString().decode('utf-8', errors='replace') if", "TType.STRING, 1) oprot.writeString(self.level.encode('utf-8') if sys.version_info[0] == 2 else self.level) oprot.writeFieldEnd() if self.message is", "isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None: iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) return iprot.readStructBegin()", "), # 2 ) all_structs.append(HttpRequest) HttpRequest.thrift_spec = ( None, # 0 (1, TType.STRING,", "def __init__(self, eventName=None, data=None,): self.eventName = eventName self.data = data def read(self, iprot):", "self.baseUrl = baseUrl self.app = app self.user = user def read(self, iprot): if", "app=None, user=None,): self.routeId = routeId self.baseUrl = baseUrl self.app = app self.user =", "self.appKey = appKey self.scopes = scopes self.parameters = parameters def read(self, iprot): if", "{} (_ktype1, _vtype2, _size0) = iprot.readMapBegin() for _i4 in range(_size0): _key5 = iprot.readString().decode('utf-8',", "def __init__(self, id=None, roleId=None, categoryId=None, status=None, name=None, email=None, points=None,): self.id = id self.roleId", "None and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Message') if self.success", "other.__dict__ def __ne__(self, other): return not (self == other) class Request(object): \"\"\" Attributes:", "ftype == TType.BOOL: self.success = iprot.readBool() else: iprot.skip(ftype) elif fid == 2: if", "other): return not (self == other) class Connection(object): \"\"\" Attributes: - name -", "if self.success is not None: oprot.writeFieldBegin('success', TType.BOOL, 1) oprot.writeBool(self.success) oprot.writeFieldEnd() if self.message is", "other) class Event(object): \"\"\" Attributes: - eventName - data \"\"\" def __init__(self, eventName=None,", "ftype == TType.I32: self.points = iprot.readI32() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def", "3 ) all_structs.append(Action) Action.thrift_spec = ( None, # 0 (1, TType.STRING, 'name', 'UTF8',", "oprot.writeFieldBegin('body', TType.STRING, 5) oprot.writeString(self.body.encode('utf-8') if sys.version_info[0] == 2 else self.body) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd()", "user def read(self, iprot): if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and", "len(self.parameters)) for iter49 in self.parameters: oprot.writeString(iter49.encode('utf-8') if sys.version_info[0] == 2 else iter49) oprot.writeListEnd()", "(self == other) class Context(object): \"\"\" Attributes: - routeId - baseUrl - app", "'UTF8', TType.STRING, 'UTF8', False), None, ), # 3 ) all_structs.append(Action) Action.thrift_spec = (", "== 2 else self.action) oprot.writeFieldEnd() if self.request is not None: oprot.writeFieldBegin('request', TType.STRUCT, 2)", "TType.STRUCT, 'user', [User, None], None, ), # 4 ) all_structs.append(App) App.thrift_spec = (", "'userId', None, None, ), # 2 (3, TType.I32, 'status', None, None, ), #", "fid == 4: if ftype == TType.STRUCT: self.user = User() self.user.read(iprot) else: iprot.skip(ftype)", "'http', [HttpRequest, None], None, ), # 1 (2, TType.STRUCT, 'rpc', [RpcRequest, None], None,", "TType.STRUCT, 3) self.app.write(oprot) oprot.writeFieldEnd() if self.user is not None: oprot.writeFieldBegin('user', TType.STRUCT, 4) self.user.write(oprot)", "'categoryId', None, None, ), # 3 (4, TType.I32, 'status', None, None, ), #", "TType.BOOL, 'success', None, None, ), # 1 (2, TType.STRING, 'message', 'UTF8', None, ),", "'UTF8', False), None, ), # 2 (3, TType.MAP, 'uriFragments', (TType.STRING, 'UTF8', TType.STRING, 'UTF8',", "(5, TType.STRING, 'body', 'UTF8', None, ), # 5 ) all_structs.append(RpcRequest) RpcRequest.thrift_spec = (", "# 5 (6, TType.STRING, 'email', 'UTF8', None, ), # 6 (7, TType.I32, 'points',", "= name self.code = code def read(self, iprot): if iprot._fast_decode is not None", "in range(_size56): _elem61 = Log() _elem61.read(iprot) self.logs.append(_elem61) iprot.readListEnd() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd()", "(TType.STRING, 'UTF8', TType.STRING, 'UTF8', False), None, ), # 4 (5, TType.STRING, 'body', 'UTF8',", ") all_structs.append(Response) Response.thrift_spec = ( None, # 0 (1, TType.I32, 'statusCode', None, None,", "= config def read(self, iprot): if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport)", "== other.__dict__ def __ne__(self, other): return not (self == other) class Connection(object): \"\"\"", "else iprot.readString() self.parameters.append(_elem47) iprot.readListEnd() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot):", "2 else kiter34) oprot.writeString(viter35.encode('utf-8') if sys.version_info[0] == 2 else viter35) oprot.writeMapEnd() oprot.writeFieldEnd() if", "[Request, None], None, ), # 2 (3, TType.STRUCT, 'context', [Context, None], None, ),", "if fid == 1: if ftype == TType.STRING: self.level = iprot.readString().decode('utf-8', errors='replace') if", "self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Log') if self.level is not", "2 else self.code) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L =", "TType.MAP, 2) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.headers)) for kiter30, viter31 in self.headers.items(): oprot.writeString(kiter30.encode('utf-8') if sys.version_info[0]", "is not None: oprot.writeFieldBegin('logs', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.logs)) for iter63 in self.logs: iter63.write(oprot)", "__init__(self, id=None, userId=None, status=None, name=None, url=None, appKey=None, scopes=None, parameters=None,): self.id = id self.userId", "1 (2, TType.STRING, 'data', 'UTF8', None, ), # 2 ) all_structs.append(Log) Log.thrift_spec =", "other): return not (self == other) class Request(object): \"\"\" Attributes: - http -", "_val6 iprot.readMapEnd() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot._fast_encode", "if self.body is not None: oprot.writeFieldBegin('body', TType.STRING, 3) oprot.writeString(self.body.encode('utf-8') if sys.version_info[0] == 2", "if sys.version_info[0] == 2 else iprot.readString() _val6 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] ==", "ftype == TType.LIST: self.parameters = [] (_etype45, _size42) = iprot.readListBegin() for _i46 in", "None: oprot.writeFieldBegin('points', TType.I32, 7) oprot.writeI32(self.points) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self):", "= _val22 iprot.readMapEnd() else: iprot.skip(ftype) elif fid == 4: if ftype == TType.MAP:", "5) oprot.writeString(self.url.encode('utf-8') if sys.version_info[0] == 2 else self.url) oprot.writeFieldEnd() if self.appKey is not", "= iprot.readListBegin() for _i46 in range(_size42): _elem47 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] ==", "== 1: if ftype == TType.I32: self.statusCode = iprot.readI32() else: iprot.skip(ftype) elif fid", "None, ), # 7 ) all_structs.append(Result) Result.thrift_spec = ( None, # 0 (1,", "TFrozenDict, TException, TApplicationException from thrift.protocol.TProtocol import TProtocolException from thrift.TRecursive import fix_spec import sys", "name - code \"\"\" def __init__(self, name=None, code=None,): self.name = name self.code =", "other): return not (self == other) class Event(object): \"\"\" Attributes: - eventName -", "is not None: oprot.writeFieldBegin('request', TType.STRUCT, 2) self.request.write(oprot) oprot.writeFieldEnd() if self.context is not None:", "_key5 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() _val6 = iprot.readString().decode('utf-8',", "oprot.writeString(kiter34.encode('utf-8') if sys.version_info[0] == 2 else kiter34) oprot.writeString(viter35.encode('utf-8') if sys.version_info[0] == 2 else", "else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot._fast_encode is not None and", "break if fid == 1: if ftype == TType.STRING: self.name = iprot.readString().decode('utf-8', errors='replace')", "TType.I64, 1) oprot.writeI64(self.routeId) oprot.writeFieldEnd() if self.baseUrl is not None: oprot.writeFieldBegin('baseUrl', TType.STRING, 2) oprot.writeString(self.baseUrl.encode('utf-8')", "in range(_size42): _elem47 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() self.parameters.append(_elem47)", "ftype == TType.STRING: self.body = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()", "== other.__dict__ def __ne__(self, other): return not (self == other) class Action(object): \"\"\"", ") all_structs.append(Context) Context.thrift_spec = ( None, # 0 (1, TType.I64, 'routeId', None, None,", "== 2 else iprot.readString() _val70 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else", "[self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Connection') if self.name is not None: oprot.writeFieldBegin('name', TType.STRING, 1) oprot.writeString(self.name.encode('utf-8')", "TType.I64, 'routeId', None, None, ), # 1 (2, TType.STRING, 'baseUrl', 'UTF8', None, ),", "None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Connection') if self.name is not None: oprot.writeFieldBegin('name', TType.STRING,", "ftype == TType.STOP: break if fid == 1: if ftype == TType.I32: self.statusCode", "Attributes: - eventName - data \"\"\" def __init__(self, eventName=None, data=None,): self.eventName = eventName", "8 ) all_structs.append(User) User.thrift_spec = ( None, # 0 (1, TType.I64, 'id', None,", "self.userId = iprot.readI64() else: iprot.skip(ftype) elif fid == 3: if ftype == TType.I32:", "2: if ftype == TType.STRING: self.type = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2", "1) oprot.writeI64(self.id) oprot.writeFieldEnd() if self.userId is not None: oprot.writeFieldBegin('userId', TType.I64, 2) oprot.writeI64(self.userId) oprot.writeFieldEnd()", "response - events - logs \"\"\" def __init__(self, response=None, events=None, logs=None,): self.response =", "_val70 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() self.headers[_key69] = _val70", "not (self == other) class RpcRequest(object): \"\"\" Attributes: - arguments \"\"\" def __init__(self,", "'events', (TType.STRUCT, [Event, None], False), None, ), # 2 (3, TType.LIST, 'logs', (TType.STRUCT,", "== 1: if ftype == TType.I64: self.id = iprot.readI64() else: iprot.skip(ftype) elif fid", "ftype == TType.STRUCT: self.response = Response() self.response.read(iprot) else: iprot.skip(ftype) elif fid == 2:", "), # 7 (8, TType.LIST, 'parameters', (TType.STRING, 'UTF8', False), None, ), # 8", "__ne__(self, other): return not (self == other) class HttpRequest(object): \"\"\" Attributes: - method", "for _i4 in range(_size0): _key5 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else", "(TType.STRING, 'UTF8', TType.STRING, 'UTF8', False), None, ), # 2 (3, TType.MAP, 'uriFragments', (TType.STRING,", "== 3: if ftype == TType.I64: self.categoryId = iprot.readI64() else: iprot.skip(ftype) elif fid", "'url', 'UTF8', None, ), # 5 (6, TType.STRING, 'appKey', 'UTF8', None, ), #", "(_ktype65, _vtype66, _size64) = iprot.readMapBegin() for _i68 in range(_size64): _key69 = iprot.readString().decode('utf-8', errors='replace')", "if sys.version_info[0] == 2 else iprot.readString() self.parameters[_key28] = _val29 iprot.readMapEnd() else: iprot.skip(ftype) elif", "[Response, None], None, ), # 1 (2, TType.LIST, 'events', (TType.STRUCT, [Event, None], False),", "errors='replace') if sys.version_info[0] == 2 else iprot.readString() _val15 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0]", "== 2 else self.name) oprot.writeFieldEnd() if self.type is not None: oprot.writeFieldBegin('type', TType.STRING, 2)", "len(self.parameters)) for kiter34, viter35 in self.parameters.items(): oprot.writeString(kiter34.encode('utf-8') if sys.version_info[0] == 2 else kiter34)", "= eventName self.data = data def read(self, iprot): if iprot._fast_decode is not None", "None: oprot.writeFieldBegin('name', TType.STRING, 1) oprot.writeString(self.name.encode('utf-8') if sys.version_info[0] == 2 else self.name) oprot.writeFieldEnd() if", "== 4: if ftype == TType.I32: self.status = iprot.readI32() else: iprot.skip(ftype) elif fid", "None], False), None, ), # 2 (3, TType.LIST, 'logs', (TType.STRUCT, [Log, None], False),", "is not None: oprot.writeFieldBegin('name', TType.STRING, 5) oprot.writeString(self.name.encode('utf-8') if sys.version_info[0] == 2 else self.name)", "2 else iprot.readString() self.parameters.append(_elem47) iprot.readListEnd() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self,", "def __init__(self, action=None, request=None, context=None,): self.action = action self.request = request self.context =", "(4, TType.STRUCT, 'user', [User, None], None, ), # 4 ) all_structs.append(App) App.thrift_spec =", "not None: oprot.writeFieldBegin('method', TType.STRING, 1) oprot.writeString(self.method.encode('utf-8') if sys.version_info[0] == 2 else self.method) oprot.writeFieldEnd()", "- id - roleId - categoryId - status - name - email -", "self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Message') if self.success is not", "iprot.skip(ftype) elif fid == 3: if ftype == TType.I32: self.status = iprot.readI32() else:", "= id self.roleId = roleId self.categoryId = categoryId self.status = status self.name =", "roleId self.categoryId = categoryId self.status = status self.name = name self.email = email", "= {} (_ktype17, _vtype18, _size16) = iprot.readMapBegin() for _i20 in range(_size16): _key21 =", "else iprot.readString() else: iprot.skip(ftype) elif fid == 6: if ftype == TType.STRING: self.email", "oprot.writeFieldBegin('url', TType.STRING, 5) oprot.writeString(self.url.encode('utf-8') if sys.version_info[0] == 2 else self.url) oprot.writeFieldEnd() if self.appKey", "'body', 'UTF8', None, ), # 3 ) all_structs.append(Event) Event.thrift_spec = ( None, #", "oprot.writeFieldBegin('categoryId', TType.I64, 3) oprot.writeI64(self.categoryId) oprot.writeFieldEnd() if self.status is not None: oprot.writeFieldBegin('status', TType.I32, 4)", "oprot.writeFieldBegin('success', TType.BOOL, 1) oprot.writeBool(self.success) oprot.writeFieldEnd() if self.message is not None: oprot.writeFieldBegin('message', TType.STRING, 2)", "== 2 else self.baseUrl) oprot.writeFieldEnd() if self.app is not None: oprot.writeFieldBegin('app', TType.STRUCT, 3)", "oprot.writeStructBegin('Event') if self.eventName is not None: oprot.writeFieldBegin('eventName', TType.STRING, 1) oprot.writeString(self.eventName.encode('utf-8') if sys.version_info[0] ==", "== TType.STRING: self.email = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() else:", "oprot.writeStructBegin('Log') if self.level is not None: oprot.writeFieldBegin('level', TType.STRING, 1) oprot.writeString(self.level.encode('utf-8') if sys.version_info[0] ==", "_val22 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() self.uriFragments[_key21] = _val22", "all_structs.append(Log) Log.thrift_spec = ( None, # 0 (1, TType.STRING, 'level', 'UTF8', None, ),", "\"\"\" Attributes: - method - headers - uriFragments - parameters - body \"\"\"", "return not (self == other) class Event(object): \"\"\" Attributes: - eventName - data", "(3, TType.STRUCT, 'context', [Context, None], None, ), # 3 ) all_structs.append(Request) Request.thrift_spec =", "(_etype45, _size42) = iprot.readListBegin() for _i46 in range(_size42): _elem47 = iprot.readString().decode('utf-8', errors='replace') if", "None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Message') if self.success is not None: oprot.writeFieldBegin('success', TType.BOOL,", "name=None, email=None, points=None,): self.id = id self.roleId = roleId self.categoryId = categoryId self.status", "oprot.writeFieldBegin('http', TType.STRUCT, 1) self.http.write(oprot) oprot.writeFieldEnd() if self.rpc is not None: oprot.writeFieldBegin('rpc', TType.STRUCT, 2)", "iprot.readString() else: iprot.skip(ftype) elif fid == 5: if ftype == TType.STRING: self.url =", "(2, TType.MAP, 'headers', (TType.STRING, 'UTF8', TType.STRING, 'UTF8', False), None, ), # 2 (3,", "not None and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Connection') if", "def __ne__(self, other): return not (self == other) class Log(object): \"\"\" Attributes: -", "Attributes: - statusCode - headers - body \"\"\" def __init__(self, statusCode=None, headers=None, body=None,):", "name - url - appKey - scopes - parameters \"\"\" def __init__(self, id=None,", "and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('App') if self.id is", "is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Action') if self.name is not None:", "iprot.readI64() else: iprot.skip(ftype) elif fid == 4: if ftype == TType.I32: self.status =", "return oprot.writeStructBegin('RpcRequest') if self.arguments is not None: oprot.writeFieldBegin('arguments', TType.STRING, 1) oprot.writeString(self.arguments.encode('utf-8') if sys.version_info[0]", "else self.baseUrl) oprot.writeFieldEnd() if self.app is not None: oprot.writeFieldBegin('app', TType.STRUCT, 3) self.app.write(oprot) oprot.writeFieldEnd()", "else kiter71) oprot.writeString(viter72.encode('utf-8') if sys.version_info[0] == 2 else viter72) oprot.writeMapEnd() oprot.writeFieldEnd() if self.body", "if sys.version_info[0] == 2 else iprot.readString() else: iprot.skip(ftype) elif fid == 6: if", "not None and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Action') if", "data def read(self, iprot): if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and", "# from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException from thrift.protocol.TProtocol import TProtocolException", "'status', None, None, ), # 4 (5, TType.STRING, 'name', 'UTF8', None, ), #", "if sys.version_info[0] == 2 else iprot.readString() self.headers[_key69] = _val70 iprot.readMapEnd() else: iprot.skip(ftype) elif", "None, ), # 4 ) all_structs.append(App) App.thrift_spec = ( None, # 0 (1,", "if ftype == TType.MAP: self.headers = {} (_ktype10, _vtype11, _size9) = iprot.readMapBegin() for", "), # 1 (2, TType.STRING, 'baseUrl', 'UTF8', None, ), # 2 (3, TType.STRUCT,", "[] (_etype53, _size50) = iprot.readListBegin() for _i54 in range(_size50): _elem55 = Event() _elem55.read(iprot)", "(TType.STRUCT, [Log, None], False), None, ), # 3 ) all_structs.append(Response) Response.thrift_spec = (", "ftype == TType.MAP: self.config = {} (_ktype1, _vtype2, _size0) = iprot.readMapBegin() for _i4", "== 2: if ftype == TType.MAP: self.headers = {} (_ktype65, _vtype66, _size64) =", "None, ), # 5 (6, TType.STRING, 'appKey', 'UTF8', None, ), # 6 (7,", "- headers - uriFragments - parameters - body \"\"\" def __init__(self, method=None, headers=None,", "Event(object): \"\"\" Attributes: - eventName - data \"\"\" def __init__(self, eventName=None, data=None,): self.eventName", "else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRING: self.code = iprot.readString().decode('utf-8',", "[self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Request') if self.http is not None: oprot.writeFieldBegin('http', TType.STRUCT, 1) self.http.write(oprot)", "iprot.skip(ftype) elif fid == 3: if ftype == TType.LIST: self.logs = [] (_etype59,", "self.level = level self.message = message def read(self, iprot): if iprot._fast_decode is not", "_val15 iprot.readMapEnd() else: iprot.skip(ftype) elif fid == 3: if ftype == TType.MAP: self.uriFragments", "0 (1, TType.STRING, 'arguments', 'UTF8', None, ), # 1 ) all_structs.append(Context) Context.thrift_spec =", "is not None: oprot.writeFieldBegin('name', TType.STRING, 1) oprot.writeString(self.name.encode('utf-8') if sys.version_info[0] == 2 else self.name)", "is not None: oprot.writeFieldBegin('body', TType.STRING, 3) oprot.writeString(self.body.encode('utf-8') if sys.version_info[0] == 2 else self.body)", "not None: oprot.writeFieldBegin('arguments', TType.STRING, 1) oprot.writeString(self.arguments.encode('utf-8') if sys.version_info[0] == 2 else self.arguments) oprot.writeFieldEnd()", "body \"\"\" def __init__(self, method=None, headers=None, uriFragments=None, parameters=None, body=None,): self.method = method self.headers", "1 (2, TType.I64, 'userId', None, None, ), # 2 (3, TType.I32, 'status', None,", "None, None, ), # 2 (3, TType.I32, 'status', None, None, ), # 3", "TType.STRING, 'UTF8', False), None, ), # 3 ) all_structs.append(Action) Action.thrift_spec = ( None,", "def validate(self): return def __repr__(self): L = ['%s=%r' % (key, value) for key,", "Action(object): \"\"\" Attributes: - name - code \"\"\" def __init__(self, name=None, code=None,): self.name", "points \"\"\" def __init__(self, id=None, roleId=None, categoryId=None, status=None, name=None, email=None, points=None,): self.id =", "is not None: oprot.writeFieldBegin('context', TType.STRUCT, 3) self.context.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return", "sys.version_info[0] == 2 else iprot.readString() else: iprot.skip(ftype) elif fid == 7: if ftype", "iprot.readListEnd() else: iprot.skip(ftype) elif fid == 8: if ftype == TType.LIST: self.parameters =", "(1, TType.STRING, 'arguments', 'UTF8', None, ), # 1 ) all_structs.append(Context) Context.thrift_spec = (", "== other) class Response(object): \"\"\" Attributes: - statusCode - headers - body \"\"\"", "'UTF8', None, ), # 4 (5, TType.STRING, 'url', 'UTF8', None, ), # 5", "return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and", "= iprot.readI64() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRING: self.baseUrl", "), # 4 (5, TType.STRING, 'body', 'UTF8', None, ), # 5 ) all_structs.append(RpcRequest)", "if ftype == TType.STRING: self.method = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else", "sys.version_info[0] == 2 else iprot.readString() self.headers[_key69] = _val70 iprot.readMapEnd() else: iprot.skip(ftype) elif fid", "self.scopes.append(_elem41) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 8: if ftype == TType.LIST: self.parameters", "None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Context') if self.routeId is not None: oprot.writeFieldBegin('routeId', TType.I64,", "rpc=None,): self.http = http self.rpc = rpc def read(self, iprot): if iprot._fast_decode is", "), # 5 (6, TType.STRING, 'appKey', 'UTF8', None, ), # 6 (7, TType.LIST,", "fix_spec import sys from thrift.transport import TTransport all_structs = [] class Message(object): \"\"\"", "2 else self.level) oprot.writeFieldEnd() if self.message is not None: oprot.writeFieldBegin('message', TType.STRING, 2) oprot.writeString(self.message.encode('utf-8')", "TType.BOOL, 1) oprot.writeBool(self.success) oprot.writeFieldEnd() if self.message is not None: oprot.writeFieldBegin('message', TType.STRING, 2) oprot.writeString(self.message.encode('utf-8')", "iprot.skip(ftype) elif fid == 2: if ftype == TType.I64: self.userId = iprot.readI64() else:", "if ftype == TType.STRING: self.action = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else", "== 5: if ftype == TType.STRING: self.body = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] ==", "# 0 (1, TType.I64, 'id', None, None, ), # 1 (2, TType.I64, 'roleId',", "success self.message = message def read(self, iprot): if iprot._fast_decode is not None and", "(5, TType.STRING, 'name', 'UTF8', None, ), # 5 (6, TType.STRING, 'email', 'UTF8', None,", "in self.headers.items(): oprot.writeString(kiter30.encode('utf-8') if sys.version_info[0] == 2 else kiter30) oprot.writeString(viter31.encode('utf-8') if sys.version_info[0] ==", "in self.__dict__.items()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other,", "is not None and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Result')", "iprot.skip(ftype) elif fid == 2: if ftype == TType.MAP: self.headers = {} (_ktype10,", "None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Request') if self.http is not None: oprot.writeFieldBegin('http', TType.STRUCT,", "iprot): if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not", "viter35 in self.parameters.items(): oprot.writeString(kiter34.encode('utf-8') if sys.version_info[0] == 2 else kiter34) oprot.writeString(viter35.encode('utf-8') if sys.version_info[0]", "else self.email) oprot.writeFieldEnd() if self.points is not None: oprot.writeFieldBegin('points', TType.I32, 7) oprot.writeI32(self.points) oprot.writeFieldEnd()", "TType.I32, 'statusCode', None, None, ), # 1 (2, TType.MAP, 'headers', (TType.STRING, 'UTF8', TType.STRING,", "TType.STRUCT: self.http = HttpRequest() self.http.read(iprot) else: iprot.skip(ftype) elif fid == 2: if ftype", "self.method = method self.headers = headers self.uriFragments = uriFragments self.parameters = parameters self.body", "sys.version_info[0] == 2 else self.arguments) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self):", "2 ) all_structs.append(Connection) Connection.thrift_spec = ( None, # 0 (1, TType.STRING, 'name', 'UTF8',", "self.baseUrl = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() else: iprot.skip(ftype) elif", "'UTF8', False), None, ), # 4 (5, TType.STRING, 'body', 'UTF8', None, ), #", "TType.STRING: self.method = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() else: iprot.skip(ftype)", "self.method is not None: oprot.writeFieldBegin('method', TType.STRING, 1) oprot.writeString(self.method.encode('utf-8') if sys.version_info[0] == 2 else", "== other.__dict__ def __ne__(self, other): return not (self == other) class Result(object): \"\"\"", "arguments def read(self, iprot): if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and", "is not None and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('HttpRequest')", "other) class Connection(object): \"\"\" Attributes: - name - type - config \"\"\" def", "if ftype == TType.LIST: self.parameters = [] (_etype45, _size42) = iprot.readListBegin() for _i46", "None: oprot.writeFieldBegin('parameters', TType.LIST, 8) oprot.writeListBegin(TType.STRING, len(self.parameters)) for iter49 in self.parameters: oprot.writeString(iter49.encode('utf-8') if sys.version_info[0]", "None: oprot.writeFieldBegin('id', TType.I64, 1) oprot.writeI64(self.id) oprot.writeFieldEnd() if self.roleId is not None: oprot.writeFieldBegin('roleId', TType.I64,", "== other.__dict__ def __ne__(self, other): return not (self == other) class Request(object): \"\"\"", "fid == 4: if ftype == TType.MAP: self.parameters = {} (_ktype24, _vtype25, _size23)", "else: iprot.skip(ftype) elif fid == 6: if ftype == TType.STRING: self.email = iprot.readString().decode('utf-8',", "'success', None, None, ), # 1 (2, TType.STRING, 'message', 'UTF8', None, ), #", "name self.url = url self.appKey = appKey self.scopes = scopes self.parameters = parameters", "1) oprot.writeString(self.action.encode('utf-8') if sys.version_info[0] == 2 else self.action) oprot.writeFieldEnd() if self.request is not", "sys.version_info[0] == 2 else viter31) oprot.writeMapEnd() oprot.writeFieldEnd() if self.uriFragments is not None: oprot.writeFieldBegin('uriFragments',", "sys.version_info[0] == 2 else self.appKey) oprot.writeFieldEnd() if self.scopes is not None: oprot.writeFieldBegin('scopes', TType.LIST,", "oprot.writeFieldEnd() if self.scopes is not None: oprot.writeFieldBegin('scopes', TType.LIST, 7) oprot.writeListBegin(TType.STRING, len(self.scopes)) for iter48", "oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('RpcRequest') if self.arguments is not None: oprot.writeFieldBegin('arguments', TType.STRING, 1)", "else: iprot.skip(ftype) elif fid == 2: if ftype == TType.LIST: self.events = []", "is not None: oprot.writeFieldBegin('arguments', TType.STRING, 1) oprot.writeString(self.arguments.encode('utf-8') if sys.version_info[0] == 2 else self.arguments)", "_elem61 = Log() _elem61.read(iprot) self.logs.append(_elem61) iprot.readListEnd() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def", "(3, TType.MAP, 'uriFragments', (TType.STRING, 'UTF8', TType.STRING, 'UTF8', False), None, ), # 3 (4,", "iprot.readString() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.MAP: self.headers =", "name=None, type=None, config=None,): self.name = name self.type = type self.config = config def", "\"\"\" Attributes: - success - message \"\"\" def __init__(self, success=None, message=None,): self.success =", "# Autogenerated by Thrift Compiler (0.14.2) # # DO NOT EDIT UNLESS YOU", "# 5 (6, TType.STRING, 'appKey', 'UTF8', None, ), # 6 (7, TType.LIST, 'scopes',", "if self.parameters is not None: oprot.writeFieldBegin('parameters', TType.MAP, 4) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.parameters)) for kiter34,", "user=None,): self.routeId = routeId self.baseUrl = baseUrl self.app = app self.user = user", "# 1 (2, TType.MAP, 'headers', (TType.STRING, 'UTF8', TType.STRING, 'UTF8', False), None, ), #", "fid == 7: if ftype == TType.I32: self.points = iprot.readI32() else: iprot.skip(ftype) else:", "and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) all_structs.append(Message)", "(2, TType.STRUCT, 'request', [Request, None], None, ), # 2 (3, TType.STRUCT, 'context', [Context,", "== TType.STRING: self.data = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() else:", "oprot.writeFieldBegin('points', TType.I32, 7) oprot.writeI32(self.points) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L", "self.status = iprot.readI32() else: iprot.skip(ftype) elif fid == 5: if ftype == TType.STRING:", "None, None, ), # 3 (4, TType.STRING, 'name', 'UTF8', None, ), # 4", "== TType.LIST: self.scopes = [] (_etype39, _size36) = iprot.readListBegin() for _i40 in range(_size36):", "None: oprot.writeFieldBegin('level', TType.STRING, 1) oprot.writeString(self.level.encode('utf-8') if sys.version_info[0] == 2 else self.level) oprot.writeFieldEnd() if", "TType.STRUCT: self.context = Context() self.context.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self,", "is not None and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Message')", "None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('App') if self.id is not None: oprot.writeFieldBegin('id', TType.I64,", "self.request = request self.context = context def read(self, iprot): if iprot._fast_decode is not", "ftype == TType.STRUCT: self.app = App() self.app.read(iprot) else: iprot.skip(ftype) elif fid == 4:", "oprot.writeString(viter33.encode('utf-8') if sys.version_info[0] == 2 else viter33) oprot.writeMapEnd() oprot.writeFieldEnd() if self.parameters is not", "parameters - body \"\"\" def __init__(self, method=None, headers=None, uriFragments=None, parameters=None, body=None,): self.method =", "if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:", "oprot.writeFieldBegin('message', TType.STRING, 2) oprot.writeString(self.message.encode('utf-8') if sys.version_info[0] == 2 else self.message) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd()", "self.headers = {} (_ktype65, _vtype66, _size64) = iprot.readMapBegin() for _i68 in range(_size64): _key69", "other) class Context(object): \"\"\" Attributes: - routeId - baseUrl - app - user", "not (self == other) class User(object): \"\"\" Attributes: - id - roleId -", "None: oprot.writeFieldBegin('headers', TType.MAP, 2) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.headers)) for kiter71, viter72 in self.headers.items(): oprot.writeString(kiter71.encode('utf-8')", "TType.STRUCT: self.request = Request() self.request.read(iprot) else: iprot.skip(ftype) elif fid == 3: if ftype", "viter8) oprot.writeMapEnd() oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r'", "else viter8) oprot.writeMapEnd() oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L =", "Compiler (0.14.2) # # DO NOT EDIT UNLESS YOU ARE SURE THAT YOU", "and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Log') if self.level is", "not None: oprot.writeFieldBegin('name', TType.STRING, 1) oprot.writeString(self.name.encode('utf-8') if sys.version_info[0] == 2 else self.name) oprot.writeFieldEnd()", "oprot.writeString(kiter71.encode('utf-8') if sys.version_info[0] == 2 else kiter71) oprot.writeString(viter72.encode('utf-8') if sys.version_info[0] == 2 else", "oprot.writeString(self.url.encode('utf-8') if sys.version_info[0] == 2 else self.url) oprot.writeFieldEnd() if self.appKey is not None:", "(TType.STRING, 'UTF8', TType.STRING, 'UTF8', False), None, ), # 3 (4, TType.MAP, 'parameters', (TType.STRING,", "oprot.writeString(self.method.encode('utf-8') if sys.version_info[0] == 2 else self.method) oprot.writeFieldEnd() if self.headers is not None:", "== TType.STRING: self.arguments = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() else:", "oprot.writeString(self.name.encode('utf-8') if sys.version_info[0] == 2 else self.name) oprot.writeFieldEnd() if self.email is not None:", "other.__dict__ def __ne__(self, other): return not (self == other) class Connection(object): \"\"\" Attributes:", "sys.version_info[0] == 2 else viter33) oprot.writeMapEnd() oprot.writeFieldEnd() if self.parameters is not None: oprot.writeFieldBegin('parameters',", "oprot.writeFieldBegin('name', TType.STRING, 1) oprot.writeString(self.name.encode('utf-8') if sys.version_info[0] == 2 else self.name) oprot.writeFieldEnd() if self.code", "\"\"\" def __init__(self, routeId=None, baseUrl=None, app=None, user=None,): self.routeId = routeId self.baseUrl = baseUrl", "self.thrift_spec])) return oprot.writeStructBegin('Message') if self.success is not None: oprot.writeFieldBegin('success', TType.BOOL, 1) oprot.writeBool(self.success) oprot.writeFieldEnd()", "else self.body) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r'", "2: if ftype == TType.LIST: self.events = [] (_etype53, _size50) = iprot.readListBegin() for", "== 2 else self.eventName) oprot.writeFieldEnd() if self.data is not None: oprot.writeFieldBegin('data', TType.STRING, 2)", "None and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Context') if self.routeId", "# 2 ) all_structs.append(Log) Log.thrift_spec = ( None, # 0 (1, TType.STRING, 'level',", "oprot.writeI32(self.status) oprot.writeFieldEnd() if self.name is not None: oprot.writeFieldBegin('name', TType.STRING, 5) oprot.writeString(self.name.encode('utf-8') if sys.version_info[0]", "TType.MAP: self.headers = {} (_ktype10, _vtype11, _size9) = iprot.readMapBegin() for _i13 in range(_size9):", "oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r' % (key, value)", "not None: oprot.writeFieldBegin('response', TType.STRUCT, 1) self.response.write(oprot) oprot.writeFieldEnd() if self.events is not None: oprot.writeFieldBegin('events',", "ARE SURE THAT YOU KNOW WHAT YOU ARE DOING # # options string:", "0 (1, TType.I64, 'routeId', None, None, ), # 1 (2, TType.STRING, 'baseUrl', 'UTF8',", "self.status = status self.name = name self.email = email self.points = points def", "self.response.write(oprot) oprot.writeFieldEnd() if self.events is not None: oprot.writeFieldBegin('events', TType.LIST, 2) oprot.writeListBegin(TType.STRUCT, len(self.events)) for", "oprot.writeI32(self.status) oprot.writeFieldEnd() if self.name is not None: oprot.writeFieldBegin('name', TType.STRING, 4) oprot.writeString(self.name.encode('utf-8') if sys.version_info[0]", "self.baseUrl) oprot.writeFieldEnd() if self.app is not None: oprot.writeFieldBegin('app', TType.STRUCT, 3) self.app.write(oprot) oprot.writeFieldEnd() if", "oprot.writeFieldBegin('arguments', TType.STRING, 1) oprot.writeString(self.arguments.encode('utf-8') if sys.version_info[0] == 2 else self.arguments) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd()", "iprot.readString() self.uriFragments[_key21] = _val22 iprot.readMapEnd() else: iprot.skip(ftype) elif fid == 4: if ftype", "== TType.STOP: break if fid == 1: if ftype == TType.STRUCT: self.response =", "is not None and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('RpcRequest')", "eventName=None, data=None,): self.eventName = eventName self.data = data def read(self, iprot): if iprot._fast_decode", "for _i60 in range(_size56): _elem61 = Log() _elem61.read(iprot) self.logs.append(_elem61) iprot.readListEnd() else: iprot.skip(ftype) else:", "for key, value in self.__dict__.items()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self,", "len(self.headers)) for kiter30, viter31 in self.headers.items(): oprot.writeString(kiter30.encode('utf-8') if sys.version_info[0] == 2 else kiter30)", "not None and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Context') if", "- response - events - logs \"\"\" def __init__(self, response=None, events=None, logs=None,): self.response", ") all_structs.append(Log) Log.thrift_spec = ( None, # 0 (1, TType.STRING, 'level', 'UTF8', None,", "None, None, ), # 4 (5, TType.STRING, 'name', 'UTF8', None, ), # 5", "oprot.writeMapEnd() oprot.writeFieldEnd() if self.parameters is not None: oprot.writeFieldBegin('parameters', TType.MAP, 4) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.parameters))", "2: if ftype == TType.I64: self.roleId = iprot.readI64() else: iprot.skip(ftype) elif fid ==", "else self.type) oprot.writeFieldEnd() if self.config is not None: oprot.writeFieldBegin('config', TType.MAP, 3) oprot.writeMapBegin(TType.STRING, TType.STRING,", "and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Execute') if self.action is", "= uriFragments self.parameters = parameters self.body = body def read(self, iprot): if iprot._fast_decode", "else iprot.readString() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRUCT: self.request", "if self.config is not None: oprot.writeFieldBegin('config', TType.MAP, 3) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.config)) for kiter7,", "headers=None, uriFragments=None, parameters=None, body=None,): self.method = method self.headers = headers self.uriFragments = uriFragments", "5: if ftype == TType.STRING: self.body = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2", "Connection.thrift_spec = ( None, # 0 (1, TType.STRING, 'name', 'UTF8', None, ), #", "[Context, None], None, ), # 3 ) all_structs.append(Request) Request.thrift_spec = ( None, #", "= baseUrl self.app = app self.user = user def read(self, iprot): if iprot._fast_decode", "THAT YOU KNOW WHAT YOU ARE DOING # # options string: py #", "request self.context = context def read(self, iprot): if iprot._fast_decode is not None and", "len(self.uriFragments)) for kiter32, viter33 in self.uriFragments.items(): oprot.writeString(kiter32.encode('utf-8') if sys.version_info[0] == 2 else kiter32)", "iter48) oprot.writeListEnd() oprot.writeFieldEnd() if self.parameters is not None: oprot.writeFieldBegin('parameters', TType.LIST, 8) oprot.writeListBegin(TType.STRING, len(self.parameters))", "3: if ftype == TType.STRING: self.body = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2", "None: oprot.writeFieldBegin('statusCode', TType.I32, 1) oprot.writeI32(self.statusCode) oprot.writeFieldEnd() if self.headers is not None: oprot.writeFieldBegin('headers', TType.MAP,", "if self.uriFragments is not None: oprot.writeFieldBegin('uriFragments', TType.MAP, 3) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.uriFragments)) for kiter32,", "2 else iter49) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L", "= type self.config = config def read(self, iprot): if iprot._fast_decode is not None", "sys.version_info[0] == 2 else self.email) oprot.writeFieldEnd() if self.points is not None: oprot.writeFieldBegin('points', TType.I32,", "else: iprot.skip(ftype) elif fid == 6: if ftype == TType.STRING: self.appKey = iprot.readString().decode('utf-8',", "2: if ftype == TType.MAP: self.headers = {} (_ktype65, _vtype66, _size64) = iprot.readMapBegin()", "errors='replace') if sys.version_info[0] == 2 else iprot.readString() self.scopes.append(_elem41) iprot.readListEnd() else: iprot.skip(ftype) elif fid", "viter72) oprot.writeMapEnd() oprot.writeFieldEnd() if self.body is not None: oprot.writeFieldBegin('body', TType.STRING, 3) oprot.writeString(self.body.encode('utf-8') if", "_i40 in range(_size36): _elem41 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()", "ftype == TType.STOP: break if fid == 1: if ftype == TType.I64: self.id", "== 2: if ftype == TType.STRING: self.baseUrl = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] ==", "== 1: if ftype == TType.STRING: self.arguments = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] ==", "class Result(object): \"\"\" Attributes: - response - events - logs \"\"\" def __init__(self,", "None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Event') if self.eventName is not None: oprot.writeFieldBegin('eventName', TType.STRING,", "class Response(object): \"\"\" Attributes: - statusCode - headers - body \"\"\" def __init__(self,", "1: if ftype == TType.BOOL: self.success = iprot.readBool() else: iprot.skip(ftype) elif fid ==", "(self == other) all_structs.append(Message) Message.thrift_spec = ( None, # 0 (1, TType.BOOL, 'success',", "len(self.config)) for kiter7, viter8 in self.config.items(): oprot.writeString(kiter7.encode('utf-8') if sys.version_info[0] == 2 else kiter7)", "iprot.skip(ftype) elif fid == 2: if ftype == TType.STRUCT: self.rpc = RpcRequest() self.rpc.read(iprot)", "elif fid == 3: if ftype == TType.I64: self.categoryId = iprot.readI64() else: iprot.skip(ftype)", "import TType, TMessageType, TFrozenDict, TException, TApplicationException from thrift.protocol.TProtocol import TProtocolException from thrift.TRecursive import", "self.parameters = [] (_etype45, _size42) = iprot.readListBegin() for _i46 in range(_size42): _elem47 =", "# 0 (1, TType.STRING, 'eventName', 'UTF8', None, ), # 1 (2, TType.STRING, 'data',", "sys.version_info[0] == 2 else iprot.readString() self.headers[_key14] = _val15 iprot.readMapEnd() else: iprot.skip(ftype) elif fid", "[HttpRequest, None], None, ), # 1 (2, TType.STRUCT, 'rpc', [RpcRequest, None], None, ),", "== 2 else iter49) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self):", "Log() _elem61.read(iprot) self.logs.append(_elem61) iprot.readListEnd() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot):", "(1, TType.I64, 'routeId', None, None, ), # 1 (2, TType.STRING, 'baseUrl', 'UTF8', None,", "if ftype == TType.MAP: self.parameters = {} (_ktype24, _vtype25, _size23) = iprot.readMapBegin() for", "EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING #", "\"\"\" def __init__(self, arguments=None,): self.arguments = arguments def read(self, iprot): if iprot._fast_decode is", "None: oprot.writeFieldBegin('logs', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.logs)) for iter63 in self.logs: iter63.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd()", "if sys.version_info[0] == 2 else self.name) oprot.writeFieldEnd() if self.type is not None: oprot.writeFieldBegin('type',", "oprot.writeFieldBegin('userId', TType.I64, 2) oprot.writeI64(self.userId) oprot.writeFieldEnd() if self.status is not None: oprot.writeFieldBegin('status', TType.I32, 3)", "other.__dict__ def __ne__(self, other): return not (self == other) all_structs.append(Message) Message.thrift_spec = (", "elif fid == 3: if ftype == TType.STRUCT: self.context = Context() self.context.read(iprot) else:", "for _i27 in range(_size23): _key28 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else", "if ftype == TType.I64: self.id = iprot.readI64() else: iprot.skip(ftype) elif fid == 2:", "), # 3 (4, TType.STRUCT, 'user', [User, None], None, ), # 4 )", "== other.__dict__ def __ne__(self, other): return not (self == other) class App(object): \"\"\"", "self.appKey) oprot.writeFieldEnd() if self.scopes is not None: oprot.writeFieldBegin('scopes', TType.LIST, 7) oprot.writeListBegin(TType.STRING, len(self.scopes)) for", "def __ne__(self, other): return not (self == other) class RpcRequest(object): \"\"\" Attributes: -", "# 3 (4, TType.STRUCT, 'user', [User, None], None, ), # 4 ) all_structs.append(App)", "KNOW WHAT YOU ARE DOING # # options string: py # from thrift.Thrift", "else iprot.readString() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRING: self.type", "else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRING: self.baseUrl = iprot.readString().decode('utf-8',", "is not None and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('App')", "elif fid == 2: if ftype == TType.I64: self.userId = iprot.readI64() else: iprot.skip(ftype)", "iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() self.headers[_key14] = _val15 iprot.readMapEnd() else:", "iprot.readI64() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRING: self.baseUrl =", "None: oprot.writeFieldBegin('url', TType.STRING, 5) oprot.writeString(self.url.encode('utf-8') if sys.version_info[0] == 2 else self.url) oprot.writeFieldEnd() if", "else iprot.readString() _val15 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() self.headers[_key14]", "- statusCode - headers - body \"\"\" def __init__(self, statusCode=None, headers=None, body=None,): self.statusCode", "code \"\"\" def __init__(self, name=None, code=None,): self.name = name self.code = code def", "None and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Event') if self.eventName", "None: oprot.writeFieldBegin('request', TType.STRUCT, 2) self.request.write(oprot) oprot.writeFieldEnd() if self.context is not None: oprot.writeFieldBegin('context', TType.STRUCT,", "(1, TType.I32, 'statusCode', None, None, ), # 1 (2, TType.MAP, 'headers', (TType.STRING, 'UTF8',", "fid == 2: if ftype == TType.STRUCT: self.rpc = RpcRequest() self.rpc.read(iprot) else: iprot.skip(ftype)", "== other.__dict__ def __ne__(self, other): return not (self == other) class RpcRequest(object): \"\"\"", "= statusCode self.headers = headers self.body = body def read(self, iprot): if iprot._fast_decode", "[self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Execute') if self.action is not None: oprot.writeFieldBegin('action', TType.STRING, 1) oprot.writeString(self.action.encode('utf-8')", "errors='replace') if sys.version_info[0] == 2 else iprot.readString() else: iprot.skip(ftype) elif fid == 5:", "email=None, points=None,): self.id = id self.roleId = roleId self.categoryId = categoryId self.status =", "not None: oprot.writeFieldBegin('user', TType.STRUCT, 4) self.user.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def", "not (self == other) class Log(object): \"\"\" Attributes: - level - message \"\"\"", "parameters \"\"\" def __init__(self, id=None, userId=None, status=None, name=None, url=None, appKey=None, scopes=None, parameters=None,): self.id", "'config', (TType.STRING, 'UTF8', TType.STRING, 'UTF8', False), None, ), # 3 ) all_structs.append(Action) Action.thrift_spec", "== 2 else iter48) oprot.writeListEnd() oprot.writeFieldEnd() if self.parameters is not None: oprot.writeFieldBegin('parameters', TType.LIST,", "fid == 4: if ftype == TType.STRING: self.name = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0]", "else kiter34) oprot.writeString(viter35.encode('utf-8') if sys.version_info[0] == 2 else viter35) oprot.writeMapEnd() oprot.writeFieldEnd() if self.body", "from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException from thrift.protocol.TProtocol import TProtocolException from", "iprot.skip(ftype) elif fid == 5: if ftype == TType.STRING: self.name = iprot.readString().decode('utf-8', errors='replace')", "break if fid == 1: if ftype == TType.STRING: self.level = iprot.readString().decode('utf-8', errors='replace')", "= ( None, # 0 (1, TType.I64, 'routeId', None, None, ), # 1", "), # 2 ) all_structs.append(Execute) Execute.thrift_spec = ( None, # 0 (1, TType.STRING,", "in range(_size9): _key14 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() _val15", "statusCode=None, headers=None, body=None,): self.statusCode = statusCode self.headers = headers self.body = body def", "None: oprot.writeFieldBegin('status', TType.I32, 4) oprot.writeI32(self.status) oprot.writeFieldEnd() if self.name is not None: oprot.writeFieldBegin('name', TType.STRING,", "not None: oprot.writeFieldBegin('type', TType.STRING, 2) oprot.writeString(self.type.encode('utf-8') if sys.version_info[0] == 2 else self.type) oprot.writeFieldEnd()", "None: oprot.writeFieldBegin('data', TType.STRING, 2) oprot.writeString(self.data.encode('utf-8') if sys.version_info[0] == 2 else self.data) oprot.writeFieldEnd() oprot.writeFieldStop()", "- context \"\"\" def __init__(self, action=None, request=None, context=None,): self.action = action self.request =", "oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r' % (key, value) for", "not None: oprot.writeFieldBegin('headers', TType.MAP, 2) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.headers)) for kiter30, viter31 in self.headers.items():", "= iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() self.headers[_key14] = _val15 iprot.readMapEnd()", "fid == 2: if ftype == TType.I64: self.roleId = iprot.readI64() else: iprot.skip(ftype) elif", "1) oprot.writeString(self.level.encode('utf-8') if sys.version_info[0] == 2 else self.level) oprot.writeFieldEnd() if self.message is not", "iprot.skip(ftype) elif fid == 2: if ftype == TType.STRING: self.data = iprot.readString().decode('utf-8', errors='replace')", "self.thrift_spec])) return oprot.writeStructBegin('App') if self.id is not None: oprot.writeFieldBegin('id', TType.I64, 1) oprot.writeI64(self.id) oprot.writeFieldEnd()", "errors='replace') if sys.version_info[0] == 2 else iprot.readString() self.parameters.append(_elem47) iprot.readListEnd() else: iprot.skip(ftype) else: iprot.skip(ftype)", "not None: oprot.writeFieldBegin('id', TType.I64, 1) oprot.writeI64(self.id) oprot.writeFieldEnd() if self.roleId is not None: oprot.writeFieldBegin('roleId',", ") all_structs.append(Event) Event.thrift_spec = ( None, # 0 (1, TType.STRING, 'eventName', 'UTF8', None,", "oprot.writeI64(self.routeId) oprot.writeFieldEnd() if self.baseUrl is not None: oprot.writeFieldBegin('baseUrl', TType.STRING, 2) oprot.writeString(self.baseUrl.encode('utf-8') if sys.version_info[0]", "5 (6, TType.STRING, 'appKey', 'UTF8', None, ), # 6 (7, TType.LIST, 'scopes', (TType.STRING,", "iprot.readString() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRING: self.message =", "TType.STRING, 3) oprot.writeString(self.body.encode('utf-8') if sys.version_info[0] == 2 else self.body) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def", "other): return not (self == other) class User(object): \"\"\" Attributes: - id -", "(self == other) class Log(object): \"\"\" Attributes: - level - message \"\"\" def", "all_structs = [] class Message(object): \"\"\" Attributes: - success - message \"\"\" def", "oprot.writeFieldBegin('parameters', TType.LIST, 8) oprot.writeListBegin(TType.STRING, len(self.parameters)) for iter49 in self.parameters: oprot.writeString(iter49.encode('utf-8') if sys.version_info[0] ==", "if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRING:", ") all_structs.append(Result) Result.thrift_spec = ( None, # 0 (1, TType.STRUCT, 'response', [Response, None],", "None: oprot.writeFieldBegin('rpc', TType.STRUCT, 2) self.rpc.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self):", "oprot.writeFieldEnd() if self.body is not None: oprot.writeFieldBegin('body', TType.STRING, 5) oprot.writeString(self.body.encode('utf-8') if sys.version_info[0] ==", "self.context.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot._fast_encode is", "2 else iprot.readString() else: iprot.skip(ftype) elif fid == 6: if ftype == TType.STRING:", ") all_structs.append(User) User.thrift_spec = ( None, # 0 (1, TType.I64, 'id', None, None,", "None: oprot.writeFieldBegin('message', TType.STRING, 2) oprot.writeString(self.message.encode('utf-8') if sys.version_info[0] == 2 else self.message) oprot.writeFieldEnd() oprot.writeFieldStop()", "all_structs.append(Connection) Connection.thrift_spec = ( None, # 0 (1, TType.STRING, 'name', 'UTF8', None, ),", "False), None, ), # 2 (3, TType.STRING, 'body', 'UTF8', None, ), # 3", "TType.STRING, 1) oprot.writeString(self.action.encode('utf-8') if sys.version_info[0] == 2 else self.action) oprot.writeFieldEnd() if self.request is", "range(_size16): _key21 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() _val22 =", "errors='replace') if sys.version_info[0] == 2 else iprot.readString() self.uriFragments[_key21] = _val22 iprot.readMapEnd() else: iprot.skip(ftype)", "fid == 2: if ftype == TType.I64: self.userId = iprot.readI64() else: iprot.skip(ftype) elif", "class App(object): \"\"\" Attributes: - id - userId - status - name -", "TApplicationException from thrift.protocol.TProtocol import TProtocolException from thrift.TRecursive import fix_spec import sys from thrift.transport", "elif fid == 3: if ftype == TType.STRING: self.body = iprot.readString().decode('utf-8', errors='replace') if", "(2, TType.I64, 'roleId', None, None, ), # 2 (3, TType.I64, 'categoryId', None, None,", "1 (2, TType.STRING, 'message', 'UTF8', None, ), # 2 ) fix_spec(all_structs) del all_structs", "self.scopes: oprot.writeString(iter48.encode('utf-8') if sys.version_info[0] == 2 else iter48) oprot.writeListEnd() oprot.writeFieldEnd() if self.parameters is", "(1, TType.STRUCT, 'response', [Response, None], None, ), # 1 (2, TType.LIST, 'events', (TType.STRUCT,", "(1, TType.STRING, 'name', 'UTF8', None, ), # 1 (2, TType.STRING, 'code', 'UTF8', None,", "2 else viter72) oprot.writeMapEnd() oprot.writeFieldEnd() if self.body is not None: oprot.writeFieldBegin('body', TType.STRING, 3)", "elif fid == 3: if ftype == TType.I32: self.status = iprot.readI32() else: iprot.skip(ftype)", "fid == 3: if ftype == TType.I64: self.categoryId = iprot.readI64() else: iprot.skip(ftype) elif", "== TType.STRING: self.baseUrl = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() else:", "break if fid == 1: if ftype == TType.STRING: self.arguments = iprot.readString().decode('utf-8', errors='replace')", "errors='replace') if sys.version_info[0] == 2 else iprot.readString() else: iprot.skip(ftype) elif fid == 6:", "TType.STRING, 5) oprot.writeString(self.url.encode('utf-8') if sys.version_info[0] == 2 else self.url) oprot.writeFieldEnd() if self.appKey is", "iprot.skip(ftype) elif fid == 5: if ftype == TType.STRING: self.body = iprot.readString().decode('utf-8', errors='replace')", "not None and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Log') if", "iprot.readMapEnd() else: iprot.skip(ftype) elif fid == 3: if ftype == TType.MAP: self.uriFragments =", "self.app is not None: oprot.writeFieldBegin('app', TType.STRUCT, 3) self.app.write(oprot) oprot.writeFieldEnd() if self.user is not", "scopes=None, parameters=None,): self.id = id self.userId = userId self.status = status self.name =", "== 2 else iprot.readString() self.headers[_key14] = _val15 iprot.readMapEnd() else: iprot.skip(ftype) elif fid ==", "for iter48 in self.scopes: oprot.writeString(iter48.encode('utf-8') if sys.version_info[0] == 2 else iter48) oprot.writeListEnd() oprot.writeFieldEnd()", "1 ) all_structs.append(Context) Context.thrift_spec = ( None, # 0 (1, TType.I64, 'routeId', None,", "from thrift.transport import TTransport all_structs = [] class Message(object): \"\"\" Attributes: - success", "2) oprot.writeString(self.type.encode('utf-8') if sys.version_info[0] == 2 else self.type) oprot.writeFieldEnd() if self.config is not", "ftype == TType.MAP: self.headers = {} (_ktype65, _vtype66, _size64) = iprot.readMapBegin() for _i68", "not (self == other) class Result(object): \"\"\" Attributes: - response - events -", "self.thrift_spec])) return oprot.writeStructBegin('Connection') if self.name is not None: oprot.writeFieldBegin('name', TType.STRING, 1) oprot.writeString(self.name.encode('utf-8') if", "= name self.email = email self.points = points def read(self, iprot): if iprot._fast_decode", "not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Context') if self.routeId is not None: oprot.writeFieldBegin('routeId',", "__ne__(self, other): return not (self == other) class User(object): \"\"\" Attributes: - id", "is not None and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Execute')", "self.http.write(oprot) oprot.writeFieldEnd() if self.rpc is not None: oprot.writeFieldBegin('rpc', TType.STRUCT, 2) self.rpc.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop()", "self.response is not None: oprot.writeFieldBegin('response', TType.STRUCT, 1) self.response.write(oprot) oprot.writeFieldEnd() if self.events is not", "not None: oprot.writeFieldBegin('context', TType.STRUCT, 3) self.context.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def", "self.status is not None: oprot.writeFieldBegin('status', TType.I32, 3) oprot.writeI32(self.status) oprot.writeFieldEnd() if self.name is not", "sys.version_info[0] == 2 else iprot.readString() self.parameters[_key28] = _val29 iprot.readMapEnd() else: iprot.skip(ftype) elif fid", "return oprot.writeStructBegin('Response') if self.statusCode is not None: oprot.writeFieldBegin('statusCode', TType.I32, 1) oprot.writeI32(self.statusCode) oprot.writeFieldEnd() if", "else iprot.readString() else: iprot.skip(ftype) elif fid == 3: if ftype == TType.STRUCT: self.app", "if self.http is not None: oprot.writeFieldBegin('http', TType.STRUCT, 1) self.http.write(oprot) oprot.writeFieldEnd() if self.rpc is", "TType.I64: self.userId = iprot.readI64() else: iprot.skip(ftype) elif fid == 3: if ftype ==", "if sys.version_info[0] == 2 else self.arguments) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def", "not None: oprot.writeFieldBegin('status', TType.I32, 3) oprot.writeI32(self.status) oprot.writeFieldEnd() if self.name is not None: oprot.writeFieldBegin('name',", "if ftype == TType.STRING: self.eventName = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else", ") all_structs.append(RpcRequest) RpcRequest.thrift_spec = ( None, # 0 (1, TType.STRING, 'arguments', 'UTF8', None,", "uriFragments=None, parameters=None, body=None,): self.method = method self.headers = headers self.uriFragments = uriFragments self.parameters", "routeId - baseUrl - app - user \"\"\" def __init__(self, routeId=None, baseUrl=None, app=None,", "Attributes: - http - rpc \"\"\" def __init__(self, http=None, rpc=None,): self.http = http", "'user', [User, None], None, ), # 4 ) all_structs.append(App) App.thrift_spec = ( None,", "= iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() _val22 = iprot.readString().decode('utf-8', errors='replace')", "TType.LIST, 8) oprot.writeListBegin(TType.STRING, len(self.parameters)) for iter49 in self.parameters: oprot.writeString(iter49.encode('utf-8') if sys.version_info[0] == 2", "fid == 6: if ftype == TType.STRING: self.appKey = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0]", "__init__(self, action=None, request=None, context=None,): self.action = action self.request = request self.context = context", "_elem41 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() self.scopes.append(_elem41) iprot.readListEnd() else:", "self.thrift_spec])) return oprot.writeStructBegin('Log') if self.level is not None: oprot.writeFieldBegin('level', TType.STRING, 1) oprot.writeString(self.level.encode('utf-8') if", "'rpc', [RpcRequest, None], None, ), # 2 ) all_structs.append(HttpRequest) HttpRequest.thrift_spec = ( None,", "Execute.thrift_spec = ( None, # 0 (1, TType.STRING, 'action', 'UTF8', None, ), #", "5 (6, TType.STRING, 'email', 'UTF8', None, ), # 6 (7, TType.I32, 'points', None,", "not None: oprot.writeFieldBegin('url', TType.STRING, 5) oprot.writeString(self.url.encode('utf-8') if sys.version_info[0] == 2 else self.url) oprot.writeFieldEnd()", "else self.data) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r'", "== 1: if ftype == TType.STRUCT: self.response = Response() self.response.read(iprot) else: iprot.skip(ftype) elif", "self.action) oprot.writeFieldEnd() if self.request is not None: oprot.writeFieldBegin('request', TType.STRUCT, 2) self.request.write(oprot) oprot.writeFieldEnd() if", "'name', 'UTF8', None, ), # 1 (2, TType.STRING, 'type', 'UTF8', None, ), #", "# 4 (5, TType.STRING, 'body', 'UTF8', None, ), # 5 ) all_structs.append(RpcRequest) RpcRequest.thrift_spec", "ftype == TType.STOP: break if fid == 1: if ftype == TType.STRING: self.action", "if ftype == TType.STRUCT: self.http = HttpRequest() self.http.read(iprot) else: iprot.skip(ftype) elif fid ==", "== TType.STOP: break if fid == 1: if ftype == TType.STRING: self.action =", "in self.logs: iter63.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L", "'appKey', 'UTF8', None, ), # 6 (7, TType.LIST, 'scopes', (TType.STRING, 'UTF8', False), None,", "sys.version_info[0] == 2 else self.name) oprot.writeFieldEnd() if self.email is not None: oprot.writeFieldBegin('email', TType.STRING,", "not None: oprot.writeFieldBegin('id', TType.I64, 1) oprot.writeI64(self.id) oprot.writeFieldEnd() if self.userId is not None: oprot.writeFieldBegin('userId',", "isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self ==", "= headers self.uriFragments = uriFragments self.parameters = parameters self.body = body def read(self,", "), # 3 ) all_structs.append(Event) Event.thrift_spec = ( None, # 0 (1, TType.STRING,", "- rpc \"\"\" def __init__(self, http=None, rpc=None,): self.http = http self.rpc = rpc", "'UTF8', None, ), # 3 ) all_structs.append(Event) Event.thrift_spec = ( None, # 0", "sys.version_info[0] == 2 else self.type) oprot.writeFieldEnd() if self.config is not None: oprot.writeFieldBegin('config', TType.MAP,", "else: iprot.skip(ftype) elif fid == 3: if ftype == TType.MAP: self.uriFragments = {}", "iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() _val70 = iprot.readString().decode('utf-8', errors='replace') if", "for iter63 in self.logs: iter63.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def", "(2, TType.STRUCT, 'rpc', [RpcRequest, None], None, ), # 2 ) all_structs.append(HttpRequest) HttpRequest.thrift_spec =", "== 2 else kiter32) oprot.writeString(viter33.encode('utf-8') if sys.version_info[0] == 2 else viter33) oprot.writeMapEnd() oprot.writeFieldEnd()", "oprot.writeFieldEnd() if self.uriFragments is not None: oprot.writeFieldBegin('uriFragments', TType.MAP, 3) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.uriFragments)) for", "TType.STRING, len(self.headers)) for kiter71, viter72 in self.headers.items(): oprot.writeString(kiter71.encode('utf-8') if sys.version_info[0] == 2 else", "is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Log') if self.level is not None:", "action=None, request=None, context=None,): self.action = action self.request = request self.context = context def", "not None: oprot.writeFieldBegin('appKey', TType.STRING, 6) oprot.writeString(self.appKey.encode('utf-8') if sys.version_info[0] == 2 else self.appKey) oprot.writeFieldEnd()", "userId - status - name - url - appKey - scopes - parameters", "def __init__(self, response=None, events=None, logs=None,): self.response = response self.events = events self.logs =", "self.name) oprot.writeFieldEnd() if self.email is not None: oprot.writeFieldBegin('email', TType.STRING, 6) oprot.writeString(self.email.encode('utf-8') if sys.version_info[0]", "None, ), # 3 ) all_structs.append(Action) Action.thrift_spec = ( None, # 0 (1,", "kiter34, viter35 in self.parameters.items(): oprot.writeString(kiter34.encode('utf-8') if sys.version_info[0] == 2 else kiter34) oprot.writeString(viter35.encode('utf-8') if", "TType.LIST, 7) oprot.writeListBegin(TType.STRING, len(self.scopes)) for iter48 in self.scopes: oprot.writeString(iter48.encode('utf-8') if sys.version_info[0] == 2", "2 else iprot.readString() else: iprot.skip(ftype) elif fid == 7: if ftype == TType.I32:", "_i4 in range(_size0): _key5 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()", "1: if ftype == TType.STRUCT: self.http = HttpRequest() self.http.read(iprot) else: iprot.skip(ftype) elif fid", "else iprot.readString() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot._fast_encode", "_val6 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() self.config[_key5] = _val6", "return oprot.writeStructBegin('Request') if self.http is not None: oprot.writeFieldBegin('http', TType.STRUCT, 1) self.http.write(oprot) oprot.writeFieldEnd() if", "None], None, ), # 2 (3, TType.STRUCT, 'context', [Context, None], None, ), #", "is not None and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Response')", "not None: oprot.writeFieldBegin('code', TType.STRING, 2) oprot.writeString(self.code.encode('utf-8') if sys.version_info[0] == 2 else self.code) oprot.writeFieldEnd()", "return oprot.writeStructBegin('User') if self.id is not None: oprot.writeFieldBegin('id', TType.I64, 1) oprot.writeI64(self.id) oprot.writeFieldEnd() if", "not None and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Result') if", "iprot.readMapEnd() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot._fast_encode is", "== TType.STOP: break if fid == 1: if ftype == TType.STRING: self.eventName =", "fid == 1: if ftype == TType.STRING: self.eventName = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0]", "else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRUCT: self.request = Request()", "== other) class Execute(object): \"\"\" Attributes: - action - request - context \"\"\"", "= body def read(self, iprot): if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport)", "elif fid == 2: if ftype == TType.STRING: self.message = iprot.readString().decode('utf-8', errors='replace') if", "in self.scopes: oprot.writeString(iter48.encode('utf-8') if sys.version_info[0] == 2 else iter48) oprot.writeListEnd() oprot.writeFieldEnd() if self.parameters", "= ( None, # 0 (1, TType.STRING, 'level', 'UTF8', None, ), # 1", "App.thrift_spec = ( None, # 0 (1, TType.I64, 'id', None, None, ), #", "if ftype == TType.STOP: break if fid == 1: if ftype == TType.I64:", "other): return not (self == other) class Execute(object): \"\"\" Attributes: - action -", "if sys.version_info[0] == 2 else iprot.readString() self.scopes.append(_elem41) iprot.readListEnd() else: iprot.skip(ftype) elif fid ==", "(2, TType.STRING, 'message', 'UTF8', None, ), # 2 ) all_structs.append(Connection) Connection.thrift_spec = (", "TMessageType, TFrozenDict, TException, TApplicationException from thrift.protocol.TProtocol import TProtocolException from thrift.TRecursive import fix_spec import", "oprot.writeStructBegin('Context') if self.routeId is not None: oprot.writeFieldBegin('routeId', TType.I64, 1) oprot.writeI64(self.routeId) oprot.writeFieldEnd() if self.baseUrl", "self.id = id self.userId = userId self.status = status self.name = name self.url", "iter48 in self.scopes: oprot.writeString(iter48.encode('utf-8') if sys.version_info[0] == 2 else iter48) oprot.writeListEnd() oprot.writeFieldEnd() if", "== 5: if ftype == TType.STRING: self.name = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] ==", "None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Result') if self.response is not None: oprot.writeFieldBegin('response', TType.STRUCT,", "oprot.writeFieldBegin('uriFragments', TType.MAP, 3) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.uriFragments)) for kiter32, viter33 in self.uriFragments.items(): oprot.writeString(kiter32.encode('utf-8') if", "__ne__(self, other): return not (self == other) all_structs.append(Message) Message.thrift_spec = ( None, #", "TType.LIST, 'parameters', (TType.STRING, 'UTF8', False), None, ), # 8 ) all_structs.append(User) User.thrift_spec =", "while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if", "2 else iprot.readString() _val29 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()", "\"\"\" Attributes: - name - type - config \"\"\" def __init__(self, name=None, type=None,", "name - email - points \"\"\" def __init__(self, id=None, roleId=None, categoryId=None, status=None, name=None,", "), # 4 (5, TType.STRING, 'url', 'UTF8', None, ), # 5 (6, TType.STRING,", "string: py # from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException from thrift.protocol.TProtocol", "scopes - parameters \"\"\" def __init__(self, id=None, userId=None, status=None, name=None, url=None, appKey=None, scopes=None,", "else: iprot.skip(ftype) elif fid == 4: if ftype == TType.STRING: self.name = iprot.readString().decode('utf-8',", "name self.type = type self.config = config def read(self, iprot): if iprot._fast_decode is", "- roleId - categoryId - status - name - email - points \"\"\"", "= {} (_ktype10, _vtype11, _size9) = iprot.readMapBegin() for _i13 in range(_size9): _key14 =", "else iprot.readString() self.headers[_key69] = _val70 iprot.readMapEnd() else: iprot.skip(ftype) elif fid == 3: if", "(fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid ==", "return not (self == other) class Context(object): \"\"\" Attributes: - routeId - baseUrl", "return oprot.writeStructBegin('App') if self.id is not None: oprot.writeFieldBegin('id', TType.I64, 1) oprot.writeI64(self.id) oprot.writeFieldEnd() if", "self.success is not None: oprot.writeFieldBegin('success', TType.BOOL, 1) oprot.writeBool(self.success) oprot.writeFieldEnd() if self.message is not", "not None: oprot.writeFieldBegin('action', TType.STRING, 1) oprot.writeString(self.action.encode('utf-8') if sys.version_info[0] == 2 else self.action) oprot.writeFieldEnd()", "1 (2, TType.STRING, 'message', 'UTF8', None, ), # 2 ) all_structs.append(Connection) Connection.thrift_spec =", "url=None, appKey=None, scopes=None, parameters=None,): self.id = id self.userId = userId self.status = status", "= iprot.readI64() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.I64: self.roleId", "iprot.skip(ftype) elif fid == 7: if ftype == TType.I32: self.points = iprot.readI32() else:", "'UTF8', None, ), # 2 (3, TType.MAP, 'config', (TType.STRING, 'UTF8', TType.STRING, 'UTF8', False),", "2: if ftype == TType.STRING: self.code = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2", "is not None: oprot.writeFieldBegin('action', TType.STRING, 1) oprot.writeString(self.action.encode('utf-8') if sys.version_info[0] == 2 else self.action)", "__init__(self, statusCode=None, headers=None, body=None,): self.statusCode = statusCode self.headers = headers self.body = body", "iprot.readI32() else: iprot.skip(ftype) elif fid == 4: if ftype == TType.STRING: self.name =", "TType.I32: self.statusCode = iprot.readI32() else: iprot.skip(ftype) elif fid == 2: if ftype ==", "\"\"\" def __init__(self, action=None, request=None, context=None,): self.action = action self.request = request self.context", "== 2: if ftype == TType.I64: self.roleId = iprot.readI64() else: iprot.skip(ftype) elif fid", "not None and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('RpcRequest') if", "(3, TType.I64, 'categoryId', None, None, ), # 3 (4, TType.I32, 'status', None, None,", "kiter71) oprot.writeString(viter72.encode('utf-8') if sys.version_info[0] == 2 else viter72) oprot.writeMapEnd() oprot.writeFieldEnd() if self.body is", "rpc def read(self, iprot): if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and", "other.__dict__ def __ne__(self, other): return not (self == other) class Event(object): \"\"\" Attributes:", "Attributes: - name - code \"\"\" def __init__(self, name=None, code=None,): self.name = name", "other.__dict__ def __ne__(self, other): return not (self == other) class Execute(object): \"\"\" Attributes:", "2 else iprot.readString() self.scopes.append(_elem41) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 8: if ftype", "[self.__class__, self.thrift_spec])) return oprot.writeStructBegin('User') if self.id is not None: oprot.writeFieldBegin('id', TType.I64, 1) oprot.writeI64(self.id)", "1: if ftype == TType.STRING: self.arguments = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2", "in self.parameters: oprot.writeString(iter49.encode('utf-8') if sys.version_info[0] == 2 else iter49) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd()", "- data \"\"\" def __init__(self, eventName=None, data=None,): self.eventName = eventName self.data = data", "TType.STOP: break if fid == 1: if ftype == TType.STRING: self.eventName = iprot.readString().decode('utf-8',", "- status - name - email - points \"\"\" def __init__(self, id=None, roleId=None,", "== 2 else self.url) oprot.writeFieldEnd() if self.appKey is not None: oprot.writeFieldBegin('appKey', TType.STRING, 6)", "oprot.writeString(viter72.encode('utf-8') if sys.version_info[0] == 2 else viter72) oprot.writeMapEnd() oprot.writeFieldEnd() if self.body is not", "is not None: oprot.writeFieldBegin('eventName', TType.STRING, 1) oprot.writeString(self.eventName.encode('utf-8') if sys.version_info[0] == 2 else self.eventName)", "(TType.STRING, 'UTF8', TType.STRING, 'UTF8', False), None, ), # 2 (3, TType.STRING, 'body', 'UTF8',", "TType.I64: self.routeId = iprot.readI64() else: iprot.skip(ftype) elif fid == 2: if ftype ==", "is not None: oprot.writeFieldBegin('status', TType.I32, 4) oprot.writeI32(self.status) oprot.writeFieldEnd() if self.name is not None:", "is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('RpcRequest') if self.arguments is not None:", "logs def read(self, iprot): if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and", "TProtocolException from thrift.TRecursive import fix_spec import sys from thrift.transport import TTransport all_structs =", "# 0 (1, TType.STRING, 'method', 'UTF8', None, ), # 1 (2, TType.MAP, 'headers',", "{} (_ktype10, _vtype11, _size9) = iprot.readMapBegin() for _i13 in range(_size9): _key14 = iprot.readString().decode('utf-8',", "write(self, oprot): if oprot._fast_encode is not None and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self,", "None, # 0 (1, TType.I64, 'id', None, None, ), # 1 (2, TType.I64,", "4: if ftype == TType.MAP: self.parameters = {} (_ktype24, _vtype25, _size23) = iprot.readMapBegin()", "= scopes self.parameters = parameters def read(self, iprot): if iprot._fast_decode is not None", "_i68 in range(_size64): _key69 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()", "__init__(self, name=None, code=None,): self.name = name self.code = code def read(self, iprot): if", "4 (5, TType.STRING, 'body', 'UTF8', None, ), # 5 ) all_structs.append(RpcRequest) RpcRequest.thrift_spec =", "'UTF8', None, ), # 1 ) all_structs.append(Context) Context.thrift_spec = ( None, # 0", "TTransport all_structs = [] class Message(object): \"\"\" Attributes: - success - message \"\"\"", "iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break", "elif fid == 3: if ftype == TType.MAP: self.config = {} (_ktype1, _vtype2,", "__eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return", "= iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() _val6 = iprot.readString().decode('utf-8', errors='replace')", "status self.name = name self.url = url self.appKey = appKey self.scopes = scopes", "- id - userId - status - name - url - appKey -", "def __ne__(self, other): return not (self == other) class Connection(object): \"\"\" Attributes: -", "= iprot.readMapBegin() for _i27 in range(_size23): _key28 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] ==", "== other) class HttpRequest(object): \"\"\" Attributes: - method - headers - uriFragments -", "not None: oprot.writeFieldBegin('eventName', TType.STRING, 1) oprot.writeString(self.eventName.encode('utf-8') if sys.version_info[0] == 2 else self.eventName) oprot.writeFieldEnd()", "'UTF8', None, ), # 2 (3, TType.STRUCT, 'app', [App, None], None, ), #", "fid == 1: if ftype == TType.STRING: self.level = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0]", "None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('User') if self.id is not None: oprot.writeFieldBegin('id', TType.I64,", "== 2 else self.arguments) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L", "range(_size0): _key5 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() _val6 =", "{} (_ktype24, _vtype25, _size23) = iprot.readMapBegin() for _i27 in range(_size23): _key28 = iprot.readString().decode('utf-8',", "'roleId', None, None, ), # 2 (3, TType.I64, 'categoryId', None, None, ), #", "if self.level is not None: oprot.writeFieldBegin('level', TType.STRING, 1) oprot.writeString(self.level.encode('utf-8') if sys.version_info[0] == 2", "_i54 in range(_size50): _elem55 = Event() _elem55.read(iprot) self.events.append(_elem55) iprot.readListEnd() else: iprot.skip(ftype) elif fid", "= ( None, # 0 (1, TType.I64, 'id', None, None, ), # 1", "TType.STOP: break if fid == 1: if ftype == TType.I64: self.id = iprot.readI64()", "oprot.writeFieldEnd() if self.body is not None: oprot.writeFieldBegin('body', TType.STRING, 3) oprot.writeString(self.body.encode('utf-8') if sys.version_info[0] ==", "iprot.readMapEnd() else: iprot.skip(ftype) elif fid == 4: if ftype == TType.MAP: self.parameters =", "points def read(self, iprot): if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and", "self.action = action self.request = request self.context = context def read(self, iprot): if", "oprot.writeString(viter35.encode('utf-8') if sys.version_info[0] == 2 else viter35) oprot.writeMapEnd() oprot.writeFieldEnd() if self.body is not", "== 6: if ftype == TType.STRING: self.appKey = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] ==", "is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Connection') if self.name is not None:", "== 1: if ftype == TType.STRING: self.level = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] ==", "iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() _val22 = iprot.readString().decode('utf-8', errors='replace') if", "iprot.readString() self.parameters.append(_elem47) iprot.readListEnd() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if", "2 else viter31) oprot.writeMapEnd() oprot.writeFieldEnd() if self.uriFragments is not None: oprot.writeFieldBegin('uriFragments', TType.MAP, 3)", "self.code = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() else: iprot.skip(ftype) else:", "(3, TType.I32, 'status', None, None, ), # 3 (4, TType.STRING, 'name', 'UTF8', None,", "None: oprot.writeFieldBegin('userId', TType.I64, 2) oprot.writeI64(self.userId) oprot.writeFieldEnd() if self.status is not None: oprot.writeFieldBegin('status', TType.I32,", "iprot.readString() self.parameters[_key28] = _val29 iprot.readMapEnd() else: iprot.skip(ftype) elif fid == 5: if ftype", "oprot.writeFieldBegin('config', TType.MAP, 3) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.config)) for kiter7, viter8 in self.config.items(): oprot.writeString(kiter7.encode('utf-8') if", "config def read(self, iprot): if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and", "oprot.writeFieldEnd() if self.userId is not None: oprot.writeFieldBegin('userId', TType.I64, 2) oprot.writeI64(self.userId) oprot.writeFieldEnd() if self.status", "range(_size23): _key28 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() _val29 =", "_elem55 = Event() _elem55.read(iprot) self.events.append(_elem55) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 3: if", "oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r' % (key,", "= iprot.readI32() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.MAP: self.headers", "all_structs.append(Action) Action.thrift_spec = ( None, # 0 (1, TType.STRING, 'name', 'UTF8', None, ),", "_elem61.read(iprot) self.logs.append(_elem61) iprot.readListEnd() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if", "(self == other) class Request(object): \"\"\" Attributes: - http - rpc \"\"\" def", "arguments \"\"\" def __init__(self, arguments=None,): self.arguments = arguments def read(self, iprot): if iprot._fast_decode", "== other) class Action(object): \"\"\" Attributes: - name - code \"\"\" def __init__(self,", "errors='replace') if sys.version_info[0] == 2 else iprot.readString() self.config[_key5] = _val6 iprot.readMapEnd() else: iprot.skip(ftype)", "oprot.writeFieldEnd() if self.code is not None: oprot.writeFieldBegin('code', TType.STRING, 2) oprot.writeString(self.code.encode('utf-8') if sys.version_info[0] ==", "= arguments def read(self, iprot): if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport)", "class User(object): \"\"\" Attributes: - id - roleId - categoryId - status -", "( None, # 0 (1, TType.STRING, 'eventName', 'UTF8', None, ), # 1 (2,", "if self.eventName is not None: oprot.writeFieldBegin('eventName', TType.STRING, 1) oprot.writeString(self.eventName.encode('utf-8') if sys.version_info[0] == 2", "import TTransport all_structs = [] class Message(object): \"\"\" Attributes: - success - message", "6) oprot.writeString(self.appKey.encode('utf-8') if sys.version_info[0] == 2 else self.appKey) oprot.writeFieldEnd() if self.scopes is not", "TType.STRING, len(self.config)) for kiter7, viter8 in self.config.items(): oprot.writeString(kiter7.encode('utf-8') if sys.version_info[0] == 2 else", "oprot.writeFieldEnd() if self.parameters is not None: oprot.writeFieldBegin('parameters', TType.MAP, 4) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.parameters)) for", "iprot.readString() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRING: self.type =", "self.headers is not None: oprot.writeFieldBegin('headers', TType.MAP, 2) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.headers)) for kiter30, viter31", "categoryId=None, status=None, name=None, email=None, points=None,): self.id = id self.roleId = roleId self.categoryId =", "not None: oprot.writeFieldBegin('http', TType.STRUCT, 1) self.http.write(oprot) oprot.writeFieldEnd() if self.rpc is not None: oprot.writeFieldBegin('rpc',", "iprot.readListBegin() for _i46 in range(_size42): _elem47 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2", "kiter71, viter72 in self.headers.items(): oprot.writeString(kiter71.encode('utf-8') if sys.version_info[0] == 2 else kiter71) oprot.writeString(viter72.encode('utf-8') if", "def __ne__(self, other): return not (self == other) all_structs.append(Message) Message.thrift_spec = ( None,", "5 ) all_structs.append(RpcRequest) RpcRequest.thrift_spec = ( None, # 0 (1, TType.STRING, 'arguments', 'UTF8',", "iprot.readString() _val29 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() self.parameters[_key28] =", "- headers - body \"\"\" def __init__(self, statusCode=None, headers=None, body=None,): self.statusCode = statusCode", "ftype == TType.STOP: break if fid == 1: if ftype == TType.STRING: self.name", "TType.I32, 1) oprot.writeI32(self.statusCode) oprot.writeFieldEnd() if self.headers is not None: oprot.writeFieldBegin('headers', TType.MAP, 2) oprot.writeMapBegin(TType.STRING,", "if self.email is not None: oprot.writeFieldBegin('email', TType.STRING, 6) oprot.writeString(self.email.encode('utf-8') if sys.version_info[0] == 2", "TType.STRUCT, 'context', [Context, None], None, ), # 3 ) all_structs.append(Request) Request.thrift_spec = (", "None, ), # 1 (2, TType.LIST, 'events', (TType.STRUCT, [Event, None], False), None, ),", "== 2 else self.type) oprot.writeFieldEnd() if self.config is not None: oprot.writeFieldBegin('config', TType.MAP, 3)", "2 else kiter32) oprot.writeString(viter33.encode('utf-8') if sys.version_info[0] == 2 else viter33) oprot.writeMapEnd() oprot.writeFieldEnd() if", "), # 3 (4, TType.MAP, 'parameters', (TType.STRING, 'UTF8', TType.STRING, 'UTF8', False), None, ),", "user \"\"\" def __init__(self, routeId=None, baseUrl=None, app=None, user=None,): self.routeId = routeId self.baseUrl =", "None, ), # 2 (3, TType.LIST, 'logs', (TType.STRUCT, [Log, None], False), None, ),", "None, ), # 4 (5, TType.STRING, 'body', 'UTF8', None, ), # 5 )", "# 0 (1, TType.STRUCT, 'http', [HttpRequest, None], None, ), # 1 (2, TType.STRUCT,", "_i20 in range(_size16): _key21 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()", "3 ) all_structs.append(Event) Event.thrift_spec = ( None, # 0 (1, TType.STRING, 'eventName', 'UTF8',", "ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1:", "self.events = events self.logs = logs def read(self, iprot): if iprot._fast_decode is not", "(TType.STRING, 'UTF8', TType.STRING, 'UTF8', False), None, ), # 3 ) all_structs.append(Action) Action.thrift_spec =", "thrift.transport import TTransport all_structs = [] class Message(object): \"\"\" Attributes: - success -", "oprot.writeStructBegin('Result') if self.response is not None: oprot.writeFieldBegin('response', TType.STRUCT, 1) self.response.write(oprot) oprot.writeFieldEnd() if self.events", "TType.STRING, 'level', 'UTF8', None, ), # 1 (2, TType.STRING, 'message', 'UTF8', None, ),", "not None: oprot.writeFieldBegin('status', TType.I32, 4) oprot.writeI32(self.status) oprot.writeFieldEnd() if self.name is not None: oprot.writeFieldBegin('name',", "all_structs.append(Result) Result.thrift_spec = ( None, # 0 (1, TType.STRUCT, 'response', [Response, None], None,", "TType.STRING: self.action = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() else: iprot.skip(ftype)", "not (self == other) class Response(object): \"\"\" Attributes: - statusCode - headers -", "'email', 'UTF8', None, ), # 6 (7, TType.I32, 'points', None, None, ), #", "else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot._fast_encode is not", "= [] (_etype53, _size50) = iprot.readListBegin() for _i54 in range(_size50): _elem55 = Event()", "http self.rpc = rpc def read(self, iprot): if iprot._fast_decode is not None and", "False), None, ), # 3 ) all_structs.append(Action) Action.thrift_spec = ( None, # 0", "), # 2 (3, TType.LIST, 'logs', (TType.STRUCT, [Log, None], False), None, ), #", "sys.version_info[0] == 2 else iprot.readString() else: iprot.skip(ftype) elif fid == 3: if ftype", "oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.parameters)) for kiter34, viter35 in self.parameters.items(): oprot.writeString(kiter34.encode('utf-8') if sys.version_info[0] == 2", "6 (7, TType.I32, 'points', None, None, ), # 7 ) all_structs.append(Result) Result.thrift_spec =", "self.points = points def read(self, iprot): if iprot._fast_decode is not None and isinstance(iprot.trans,", "is not None: oprot.writeFieldBegin('code', TType.STRING, 2) oprot.writeString(self.code.encode('utf-8') if sys.version_info[0] == 2 else self.code)", "\"\"\" def __init__(self, response=None, events=None, logs=None,): self.response = response self.events = events self.logs", "if ftype == TType.I32: self.status = iprot.readI32() else: iprot.skip(ftype) elif fid == 4:", "kiter30) oprot.writeString(viter31.encode('utf-8') if sys.version_info[0] == 2 else viter31) oprot.writeMapEnd() oprot.writeFieldEnd() if self.uriFragments is", "== 2 else self.level) oprot.writeFieldEnd() if self.message is not None: oprot.writeFieldBegin('message', TType.STRING, 2)", "# 1 (2, TType.STRUCT, 'request', [Request, None], None, ), # 2 (3, TType.STRUCT,", "(1, TType.STRUCT, 'http', [HttpRequest, None], None, ), # 1 (2, TType.STRUCT, 'rpc', [RpcRequest,", "3 ) all_structs.append(Response) Response.thrift_spec = ( None, # 0 (1, TType.I32, 'statusCode', None,", "elif fid == 7: if ftype == TType.I32: self.points = iprot.readI32() else: iprot.skip(ftype)", "None, None, ), # 1 (2, TType.MAP, 'headers', (TType.STRING, 'UTF8', TType.STRING, 'UTF8', False),", "= id self.userId = userId self.status = status self.name = name self.url =", "iprot.skip(ftype) elif fid == 6: if ftype == TType.STRING: self.email = iprot.readString().decode('utf-8', errors='replace')", "TType.STRING, 2) oprot.writeString(self.code.encode('utf-8') if sys.version_info[0] == 2 else self.code) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def", "= iprot.readI64() else: iprot.skip(ftype) elif fid == 3: if ftype == TType.I64: self.categoryId", "0 (1, TType.I64, 'id', None, None, ), # 1 (2, TType.I64, 'userId', None,", "ftype == TType.I64: self.userId = iprot.readI64() else: iprot.skip(ftype) elif fid == 3: if", "'UTF8', False), None, ), # 7 (8, TType.LIST, 'parameters', (TType.STRING, 'UTF8', False), None,", "# 3 ) all_structs.append(Response) Response.thrift_spec = ( None, # 0 (1, TType.I32, 'statusCode',", "and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class", "# 2 ) all_structs.append(HttpRequest) HttpRequest.thrift_spec = ( None, # 0 (1, TType.STRING, 'method',", "self.config is not None: oprot.writeFieldBegin('config', TType.MAP, 3) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.config)) for kiter7, viter8", "if sys.version_info[0] == 2 else iter48) oprot.writeListEnd() oprot.writeFieldEnd() if self.parameters is not None:", "name self.email = email self.points = points def read(self, iprot): if iprot._fast_decode is", "sys.version_info[0] == 2 else iprot.readString() self.parameters.append(_elem47) iprot.readListEnd() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd()", "self.rpc is not None: oprot.writeFieldBegin('rpc', TType.STRUCT, 2) self.rpc.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self):", "(self == other) class RpcRequest(object): \"\"\" Attributes: - arguments \"\"\" def __init__(self, arguments=None,):", "TType.I32: self.status = iprot.readI32() else: iprot.skip(ftype) elif fid == 5: if ftype ==", "oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Log') if self.level is not None: oprot.writeFieldBegin('level', TType.STRING, 1)", "3) oprot.writeI32(self.status) oprot.writeFieldEnd() if self.name is not None: oprot.writeFieldBegin('name', TType.STRING, 4) oprot.writeString(self.name.encode('utf-8') if", "ftype == TType.STOP: break if fid == 1: if ftype == TType.STRING: self.method", "== TType.LIST: self.parameters = [] (_etype45, _size42) = iprot.readListBegin() for _i46 in range(_size42):", "iprot.skip(ftype) elif fid == 4: if ftype == TType.MAP: self.parameters = {} (_ktype24,", "Connection(object): \"\"\" Attributes: - name - type - config \"\"\" def __init__(self, name=None,", "and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('RpcRequest') if self.arguments is", "= status self.name = name self.url = url self.appKey = appKey self.scopes =", "TType.STRUCT, 1) self.response.write(oprot) oprot.writeFieldEnd() if self.events is not None: oprot.writeFieldBegin('events', TType.LIST, 2) oprot.writeListBegin(TType.STRUCT,", "is not None: oprot.writeFieldBegin('parameters', TType.LIST, 8) oprot.writeListBegin(TType.STRING, len(self.parameters)) for iter49 in self.parameters: oprot.writeString(iter49.encode('utf-8')", "'body', 'UTF8', None, ), # 5 ) all_structs.append(RpcRequest) RpcRequest.thrift_spec = ( None, #", "= _val6 iprot.readMapEnd() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if", "def __ne__(self, other): return not (self == other) class Context(object): \"\"\" Attributes: -", "is not None: oprot.writeFieldBegin('scopes', TType.LIST, 7) oprot.writeListBegin(TType.STRING, len(self.scopes)) for iter48 in self.scopes: oprot.writeString(iter48.encode('utf-8')", "1) oprot.writeI64(self.routeId) oprot.writeFieldEnd() if self.baseUrl is not None: oprot.writeFieldBegin('baseUrl', TType.STRING, 2) oprot.writeString(self.baseUrl.encode('utf-8') if", "self.thrift_spec])) return oprot.writeStructBegin('Context') if self.routeId is not None: oprot.writeFieldBegin('routeId', TType.I64, 1) oprot.writeI64(self.routeId) oprot.writeFieldEnd()", "TType.MAP, 3) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.uriFragments)) for kiter32, viter33 in self.uriFragments.items(): oprot.writeString(kiter32.encode('utf-8') if sys.version_info[0]", "== 2 else iprot.readString() else: iprot.skip(ftype) elif fid == 2: if ftype ==", "name=None, url=None, appKey=None, scopes=None, parameters=None,): self.id = id self.userId = userId self.status =", "# 0 (1, TType.I64, 'id', None, None, ), # 1 (2, TType.I64, 'userId',", "is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Execute') if self.action is not None:", "body=None,): self.method = method self.headers = headers self.uriFragments = uriFragments self.parameters = parameters", "if sys.version_info[0] == 2 else viter8) oprot.writeMapEnd() oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return", "id self.userId = userId self.status = status self.name = name self.url = url", "None, # 0 (1, TType.STRUCT, 'http', [HttpRequest, None], None, ), # 1 (2,", "== 3: if ftype == TType.LIST: self.logs = [] (_etype59, _size56) = iprot.readListBegin()", "(1, TType.STRING, 'action', 'UTF8', None, ), # 1 (2, TType.STRUCT, 'request', [Request, None],", "Attributes: - id - roleId - categoryId - status - name - email", "'action', 'UTF8', None, ), # 1 (2, TType.STRUCT, 'request', [Request, None], None, ),", "errors='replace') if sys.version_info[0] == 2 else iprot.readString() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd()", "iprot.skip(ftype) elif fid == 3: if ftype == TType.STRING: self.body = iprot.readString().decode('utf-8', errors='replace')", "= iprot.readListBegin() for _i60 in range(_size56): _elem61 = Log() _elem61.read(iprot) self.logs.append(_elem61) iprot.readListEnd() else:", "class RpcRequest(object): \"\"\" Attributes: - arguments \"\"\" def __init__(self, arguments=None,): self.arguments = arguments", "2 else viter33) oprot.writeMapEnd() oprot.writeFieldEnd() if self.parameters is not None: oprot.writeFieldBegin('parameters', TType.MAP, 4)", "is not None: oprot.writeFieldBegin('app', TType.STRUCT, 3) self.app.write(oprot) oprot.writeFieldEnd() if self.user is not None:", "errors='replace') if sys.version_info[0] == 2 else iprot.readString() _val29 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0]", "if sys.version_info[0] == 2 else iprot.readString() self.uriFragments[_key21] = _val22 iprot.readMapEnd() else: iprot.skip(ftype) elif", "if self.status is not None: oprot.writeFieldBegin('status', TType.I32, 3) oprot.writeI32(self.status) oprot.writeFieldEnd() if self.name is", "self.name is not None: oprot.writeFieldBegin('name', TType.STRING, 1) oprot.writeString(self.name.encode('utf-8') if sys.version_info[0] == 2 else", "TType.I64, 'roleId', None, None, ), # 2 (3, TType.I64, 'categoryId', None, None, ),", "self.context.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r' %", "TType.I64, 1) oprot.writeI64(self.id) oprot.writeFieldEnd() if self.userId is not None: oprot.writeFieldBegin('userId', TType.I64, 2) oprot.writeI64(self.userId)", "None, ), # 1 (2, TType.STRUCT, 'rpc', [RpcRequest, None], None, ), # 2", "url - appKey - scopes - parameters \"\"\" def __init__(self, id=None, userId=None, status=None,", "oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('HttpRequest') if self.method is not None: oprot.writeFieldBegin('method', TType.STRING, 1)", "oprot.writeFieldBegin('roleId', TType.I64, 2) oprot.writeI64(self.roleId) oprot.writeFieldEnd() if self.categoryId is not None: oprot.writeFieldBegin('categoryId', TType.I64, 3)", "None, ), # 3 ) all_structs.append(Response) Response.thrift_spec = ( None, # 0 (1,", "self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('HttpRequest') if self.method is not", "iprot.skip(ftype) elif fid == 2: if ftype == TType.MAP: self.headers = {} (_ktype65,", "(2, TType.LIST, 'events', (TType.STRUCT, [Event, None], False), None, ), # 2 (3, TType.LIST,", "iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() self.parameters[_key28] = _val29 iprot.readMapEnd() else:", "not None: oprot.writeFieldBegin('baseUrl', TType.STRING, 2) oprot.writeString(self.baseUrl.encode('utf-8') if sys.version_info[0] == 2 else self.baseUrl) oprot.writeFieldEnd()", "is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Message') if self.success is not None:", "if ftype == TType.STRUCT: self.user = User() self.user.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd()", "== TType.STRUCT: self.response = Response() self.response.read(iprot) else: iprot.skip(ftype) elif fid == 2: if", "== TType.STRUCT: self.http = HttpRequest() self.http.read(iprot) else: iprot.skip(ftype) elif fid == 2: if", "None: oprot.writeFieldBegin('http', TType.STRUCT, 1) self.http.write(oprot) oprot.writeFieldEnd() if self.rpc is not None: oprot.writeFieldBegin('rpc', TType.STRUCT,", "_size0) = iprot.readMapBegin() for _i4 in range(_size0): _key5 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0]", "= level self.message = message def read(self, iprot): if iprot._fast_decode is not None", "TType.I64, 'id', None, None, ), # 1 (2, TType.I64, 'userId', None, None, ),", "ftype == TType.STRING: self.baseUrl = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()", "# 3 (4, TType.STRING, 'name', 'UTF8', None, ), # 4 (5, TType.STRING, 'url',", "points=None,): self.id = id self.roleId = roleId self.categoryId = categoryId self.status = status", "\"\"\" def __init__(self, name=None, code=None,): self.name = name self.code = code def read(self,", "method - headers - uriFragments - parameters - body \"\"\" def __init__(self, method=None,", "is not None: oprot.writeFieldBegin('baseUrl', TType.STRING, 2) oprot.writeString(self.baseUrl.encode('utf-8') if sys.version_info[0] == 2 else self.baseUrl)", "elif fid == 3: if ftype == TType.MAP: self.uriFragments = {} (_ktype17, _vtype18,", "L = ['%s=%r' % (key, value) for key, value in self.__dict__.items()] return '%s(%s)'", "value in self.__dict__.items()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return", "len(self.logs)) for iter63 in self.logs: iter63.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return", "oprot.writeFieldBegin('logs', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.logs)) for iter63 in self.logs: iter63.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop()", "(4, TType.I32, 'status', None, None, ), # 4 (5, TType.STRING, 'name', 'UTF8', None,", "TType.STOP: break if fid == 1: if ftype == TType.STRING: self.level = iprot.readString().decode('utf-8',", "self.config = {} (_ktype1, _vtype2, _size0) = iprot.readMapBegin() for _i4 in range(_size0): _key5", "iprot.skip(ftype) elif fid == 4: if ftype == TType.I32: self.status = iprot.readI32() else:", "TType.STRING: self.level = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() else: iprot.skip(ftype)", "(4, TType.STRING, 'name', 'UTF8', None, ), # 4 (5, TType.STRING, 'url', 'UTF8', None,", "fid == 1: if ftype == TType.STRING: self.action = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0]", "oprot.writeStructBegin('Execute') if self.action is not None: oprot.writeFieldBegin('action', TType.STRING, 1) oprot.writeString(self.action.encode('utf-8') if sys.version_info[0] ==", "return not (self == other) class Execute(object): \"\"\" Attributes: - action - request", "ftype == TType.MAP: self.headers = {} (_ktype10, _vtype11, _size9) = iprot.readMapBegin() for _i13", "kiter30, viter31 in self.headers.items(): oprot.writeString(kiter30.encode('utf-8') if sys.version_info[0] == 2 else kiter30) oprot.writeString(viter31.encode('utf-8') if", "else iprot.readString() self.config[_key5] = _val6 iprot.readMapEnd() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def", "kiter34) oprot.writeString(viter35.encode('utf-8') if sys.version_info[0] == 2 else viter35) oprot.writeMapEnd() oprot.writeFieldEnd() if self.body is", "self.message is not None: oprot.writeFieldBegin('message', TType.STRING, 2) oprot.writeString(self.message.encode('utf-8') if sys.version_info[0] == 2 else", "ftype == TType.STOP: break if fid == 1: if ftype == TType.I64: self.routeId", "def __ne__(self, other): return not (self == other) class Event(object): \"\"\" Attributes: -", "ftype == TType.STRING: self.level = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()", "else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRING: self.message = iprot.readString().decode('utf-8',", "\"\"\" Attributes: - statusCode - headers - body \"\"\" def __init__(self, statusCode=None, headers=None,", "for kiter32, viter33 in self.uriFragments.items(): oprot.writeString(kiter32.encode('utf-8') if sys.version_info[0] == 2 else kiter32) oprot.writeString(viter33.encode('utf-8')", "statusCode - headers - body \"\"\" def __init__(self, statusCode=None, headers=None, body=None,): self.statusCode =", "oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.config)) for kiter7, viter8 in self.config.items(): oprot.writeString(kiter7.encode('utf-8') if sys.version_info[0] == 2", "TType.STRUCT, 4) self.user.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L =", "oprot.writeFieldEnd() if self.email is not None: oprot.writeFieldBegin('email', TType.STRING, 6) oprot.writeString(self.email.encode('utf-8') if sys.version_info[0] ==", "other): return not (self == other) class HttpRequest(object): \"\"\" Attributes: - method -", "oprot.writeFieldBegin('scopes', TType.LIST, 7) oprot.writeListBegin(TType.STRING, len(self.scopes)) for iter48 in self.scopes: oprot.writeString(iter48.encode('utf-8') if sys.version_info[0] ==", "== 2 else kiter7) oprot.writeString(viter8.encode('utf-8') if sys.version_info[0] == 2 else viter8) oprot.writeMapEnd() oprot.writeFieldEnd()", "sys.version_info[0] == 2 else self.data) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self):", "4: if ftype == TType.I32: self.status = iprot.readI32() else: iprot.skip(ftype) elif fid ==", "all_structs.append(Event) Event.thrift_spec = ( None, # 0 (1, TType.STRING, 'eventName', 'UTF8', None, ),", "= Event() _elem55.read(iprot) self.events.append(_elem55) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 3: if ftype", "iprot.readMapEnd() else: iprot.skip(ftype) elif fid == 5: if ftype == TType.STRING: self.body =", "not (self == other) class Event(object): \"\"\" Attributes: - eventName - data \"\"\"", "2 else iprot.readString() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRING:", "iprot.skip(ftype) elif fid == 3: if ftype == TType.I64: self.categoryId = iprot.readI64() else:", "0 (1, TType.I32, 'statusCode', None, None, ), # 1 (2, TType.MAP, 'headers', (TType.STRING,", "def __ne__(self, other): return not (self == other) class HttpRequest(object): \"\"\" Attributes: -", "range(_size42): _elem47 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() self.parameters.append(_elem47) iprot.readListEnd()", "ftype == TType.STRUCT: self.rpc = RpcRequest() self.rpc.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd()", "1) oprot.writeI64(self.id) oprot.writeFieldEnd() if self.roleId is not None: oprot.writeFieldBegin('roleId', TType.I64, 2) oprot.writeI64(self.roleId) oprot.writeFieldEnd()", "'UTF8', None, ), # 2 ) all_structs.append(Execute) Execute.thrift_spec = ( None, # 0", "= http self.rpc = rpc def read(self, iprot): if iprot._fast_decode is not None", "viter72 in self.headers.items(): oprot.writeString(kiter71.encode('utf-8') if sys.version_info[0] == 2 else kiter71) oprot.writeString(viter72.encode('utf-8') if sys.version_info[0]", "_key21 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() _val22 = iprot.readString().decode('utf-8',", "2 else self.data) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L =", "ftype == TType.I64: self.routeId = iprot.readI64() else: iprot.skip(ftype) elif fid == 2: if", "self.parameters is not None: oprot.writeFieldBegin('parameters', TType.LIST, 8) oprot.writeListBegin(TType.STRING, len(self.parameters)) for iter49 in self.parameters:", "else iprot.readString() _val29 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() self.parameters[_key28]", "self.thrift_spec])) return oprot.writeStructBegin('Result') if self.response is not None: oprot.writeFieldBegin('response', TType.STRUCT, 1) self.response.write(oprot) oprot.writeFieldEnd()", "not None and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('HttpRequest') if", "_val22 iprot.readMapEnd() else: iprot.skip(ftype) elif fid == 4: if ftype == TType.MAP: self.parameters", "TType.STRING, 2) oprot.writeString(self.baseUrl.encode('utf-8') if sys.version_info[0] == 2 else self.baseUrl) oprot.writeFieldEnd() if self.app is", "oprot.writeFieldEnd() if self.request is not None: oprot.writeFieldBegin('request', TType.STRUCT, 2) self.request.write(oprot) oprot.writeFieldEnd() if self.context", "return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self", "oprot.writeFieldEnd() if self.events is not None: oprot.writeFieldBegin('events', TType.LIST, 2) oprot.writeListBegin(TType.STRUCT, len(self.events)) for iter62", "False), None, ), # 2 (3, TType.LIST, 'logs', (TType.STRUCT, [Log, None], False), None,", "Context() self.context.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot._fast_encode", "[self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Action') if self.name is not None: oprot.writeFieldBegin('name', TType.STRING, 1) oprot.writeString(self.name.encode('utf-8')", "__ne__(self, other): return not (self == other) class Execute(object): \"\"\" Attributes: - action", "if self.request is not None: oprot.writeFieldBegin('request', TType.STRUCT, 2) self.request.write(oprot) oprot.writeFieldEnd() if self.context is", "'UTF8', None, ), # 1 (2, TType.STRING, 'data', 'UTF8', None, ), # 2", "is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Response') if self.statusCode is not None:", "ftype == TType.MAP: self.parameters = {} (_ktype24, _vtype25, _size23) = iprot.readMapBegin() for _i27", "if self.logs is not None: oprot.writeFieldBegin('logs', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.logs)) for iter63 in", "is not None and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Connection')", "if ftype == TType.I32: self.statusCode = iprot.readI32() else: iprot.skip(ftype) elif fid == 2:", "== 2 else self.name) oprot.writeFieldEnd() if self.url is not None: oprot.writeFieldBegin('url', TType.STRING, 5)", "self.parameters: oprot.writeString(iter49.encode('utf-8') if sys.version_info[0] == 2 else iter49) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def", "== TType.I64: self.routeId = iprot.readI64() else: iprot.skip(ftype) elif fid == 2: if ftype", "1 (2, TType.I64, 'roleId', None, None, ), # 2 (3, TType.I64, 'categoryId', None,", "def __ne__(self, other): return not (self == other) class Execute(object): \"\"\" Attributes: -", "_val70 iprot.readMapEnd() else: iprot.skip(ftype) elif fid == 3: if ftype == TType.STRING: self.body", "7: if ftype == TType.LIST: self.scopes = [] (_etype39, _size36) = iprot.readListBegin() for", "def __init__(self, id=None, userId=None, status=None, name=None, url=None, appKey=None, scopes=None, parameters=None,): self.id = id", "iprot.skip(ftype) elif fid == 2: if ftype == TType.I64: self.roleId = iprot.readI64() else:", "TType.STRING: self.type = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() else: iprot.skip(ftype)", "return oprot.writeStructBegin('Action') if self.name is not None: oprot.writeFieldBegin('name', TType.STRING, 1) oprot.writeString(self.name.encode('utf-8') if sys.version_info[0]", "oprot.writeFieldEnd() if self.appKey is not None: oprot.writeFieldBegin('appKey', TType.STRING, 6) oprot.writeString(self.appKey.encode('utf-8') if sys.version_info[0] ==", "TType.STRING: self.eventName = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() else: iprot.skip(ftype)", "None, ), # 1 (2, TType.STRING, 'data', 'UTF8', None, ), # 2 )", "= ( None, # 0 (1, TType.STRING, 'name', 'UTF8', None, ), # 1", "self.logs is not None: oprot.writeFieldBegin('logs', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.logs)) for iter63 in self.logs:", "1) oprot.writeI32(self.statusCode) oprot.writeFieldEnd() if self.headers is not None: oprot.writeFieldBegin('headers', TType.MAP, 2) oprot.writeMapBegin(TType.STRING, TType.STRING,", "not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Response') if self.statusCode is not None: oprot.writeFieldBegin('statusCode',", "oprot.writeFieldBegin('id', TType.I64, 1) oprot.writeI64(self.id) oprot.writeFieldEnd() if self.roleId is not None: oprot.writeFieldBegin('roleId', TType.I64, 2)", "self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class Request(object):", "None, ), # 1 (2, TType.STRUCT, 'request', [Request, None], None, ), # 2", "2 else self.body) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L =", "YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING # # options", "self.uriFragments is not None: oprot.writeFieldBegin('uriFragments', TType.MAP, 3) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.uriFragments)) for kiter32, viter33", "None: oprot.writeFieldBegin('type', TType.STRING, 2) oprot.writeString(self.type.encode('utf-8') if sys.version_info[0] == 2 else self.type) oprot.writeFieldEnd() if", "7 (8, TType.LIST, 'parameters', (TType.STRING, 'UTF8', False), None, ), # 8 ) all_structs.append(User)", "== 1: if ftype == TType.I64: self.routeId = iprot.readI64() else: iprot.skip(ftype) elif fid", "_i60 in range(_size56): _elem61 = Log() _elem61.read(iprot) self.logs.append(_elem61) iprot.readListEnd() else: iprot.skip(ftype) else: iprot.skip(ftype)", "is not None: oprot.writeFieldBegin('id', TType.I64, 1) oprot.writeI64(self.id) oprot.writeFieldEnd() if self.roleId is not None:", "- name - email - points \"\"\" def __init__(self, id=None, roleId=None, categoryId=None, status=None,", "2 else iprot.readString() else: iprot.skip(ftype) elif fid == 7: if ftype == TType.LIST:", "for _i46 in range(_size42): _elem47 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else", "(2, TType.I64, 'userId', None, None, ), # 2 (3, TType.I32, 'status', None, None,", "None and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Request') if self.http", "== TType.STOP: break if fid == 1: if ftype == TType.I64: self.id =", "None, None, ), # 7 ) all_structs.append(Result) Result.thrift_spec = ( None, # 0", "return oprot.writeStructBegin('HttpRequest') if self.method is not None: oprot.writeFieldBegin('method', TType.STRING, 1) oprot.writeString(self.method.encode('utf-8') if sys.version_info[0]", "else: iprot.skip(ftype) elif fid == 2: if ftype == TType.MAP: self.headers = {}", "self.parameters.items(): oprot.writeString(kiter34.encode('utf-8') if sys.version_info[0] == 2 else kiter34) oprot.writeString(viter35.encode('utf-8') if sys.version_info[0] == 2", "self.points is not None: oprot.writeFieldBegin('points', TType.I32, 7) oprot.writeI32(self.points) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self):", "return oprot.writeStructBegin('Execute') if self.action is not None: oprot.writeFieldBegin('action', TType.STRING, 1) oprot.writeString(self.action.encode('utf-8') if sys.version_info[0]", "all_structs.append(RpcRequest) RpcRequest.thrift_spec = ( None, # 0 (1, TType.STRING, 'arguments', 'UTF8', None, ),", "== other) class Log(object): \"\"\" Attributes: - level - message \"\"\" def __init__(self,", "is not None: oprot.writeFieldBegin('routeId', TType.I64, 1) oprot.writeI64(self.routeId) oprot.writeFieldEnd() if self.baseUrl is not None:", "type - config \"\"\" def __init__(self, name=None, type=None, config=None,): self.name = name self.type", "self.headers = headers self.uriFragments = uriFragments self.parameters = parameters self.body = body def", "None, ), # 8 ) all_structs.append(User) User.thrift_spec = ( None, # 0 (1,", "TType.STRUCT: self.user = User() self.user.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self,", "TType.STRING, len(self.parameters)) for kiter34, viter35 in self.parameters.items(): oprot.writeString(kiter34.encode('utf-8') if sys.version_info[0] == 2 else", "and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None: iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) return", "self.id is not None: oprot.writeFieldBegin('id', TType.I64, 1) oprot.writeI64(self.id) oprot.writeFieldEnd() if self.userId is not", "if self.name is not None: oprot.writeFieldBegin('name', TType.STRING, 1) oprot.writeString(self.name.encode('utf-8') if sys.version_info[0] == 2", "== TType.STOP: break if fid == 1: if ftype == TType.STRING: self.method =", "2 ) all_structs.append(HttpRequest) HttpRequest.thrift_spec = ( None, # 0 (1, TType.STRING, 'method', 'UTF8',", "None, ), # 6 (7, TType.I32, 'points', None, None, ), # 7 )", "== 3: if ftype == TType.STRING: self.body = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] ==", "HttpRequest.thrift_spec = ( None, # 0 (1, TType.STRING, 'method', 'UTF8', None, ), #", "== TType.STRING: self.eventName = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() else:", "__init__(self, http=None, rpc=None,): self.http = http self.rpc = rpc def read(self, iprot): if", "ARE DOING # # options string: py # from thrift.Thrift import TType, TMessageType,", "iprot.readString() _val6 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() self.config[_key5] =", "oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Connection') if self.name is not None: oprot.writeFieldBegin('name', TType.STRING, 1)", "iter63.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r'", "'UTF8', False), None, ), # 8 ) all_structs.append(User) User.thrift_spec = ( None, #", "self.message = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() else: iprot.skip(ftype) else:", "2 else self.eventName) oprot.writeFieldEnd() if self.data is not None: oprot.writeFieldBegin('data', TType.STRING, 2) oprot.writeString(self.data.encode('utf-8')", "oprot.writeString(self.type.encode('utf-8') if sys.version_info[0] == 2 else self.type) oprot.writeFieldEnd() if self.config is not None:", "in range(_size36): _elem41 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() self.scopes.append(_elem41)", "== 2: if ftype == TType.I64: self.userId = iprot.readI64() else: iprot.skip(ftype) elif fid", "Attributes: - method - headers - uriFragments - parameters - body \"\"\" def", "), # 3 ) all_structs.append(Response) Response.thrift_spec = ( None, # 0 (1, TType.I32,", "other.__dict__ def __ne__(self, other): return not (self == other) class Action(object): \"\"\" Attributes:", "None and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('RpcRequest') if self.arguments", "oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Request') if self.http is not None: oprot.writeFieldBegin('http', TType.STRUCT, 1)", "if ftype == TType.LIST: self.logs = [] (_etype59, _size56) = iprot.readListBegin() for _i60", "not None: oprot.writeFieldBegin('points', TType.I32, 7) oprot.writeI32(self.points) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def", "(TType.STRUCT, [Event, None], False), None, ), # 2 (3, TType.LIST, 'logs', (TType.STRUCT, [Log,", "== 2: if ftype == TType.STRING: self.data = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] ==", "not None: oprot.writeFieldBegin('name', TType.STRING, 5) oprot.writeString(self.name.encode('utf-8') if sys.version_info[0] == 2 else self.name) oprot.writeFieldEnd()", "is not None: oprot.writeFieldBegin('type', TType.STRING, 2) oprot.writeString(self.type.encode('utf-8') if sys.version_info[0] == 2 else self.type)", "self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Execute') if self.action is not", "\"\"\" Attributes: - name - code \"\"\" def __init__(self, name=None, code=None,): self.name =", "# 2 (3, TType.STRUCT, 'context', [Context, None], None, ), # 3 ) all_structs.append(Request)", "appKey - scopes - parameters \"\"\" def __init__(self, id=None, userId=None, status=None, name=None, url=None,", "iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() _val29 = iprot.readString().decode('utf-8', errors='replace') if", "TType.STRING, 1) oprot.writeString(self.arguments.encode('utf-8') if sys.version_info[0] == 2 else self.arguments) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def", "), # 3 ) all_structs.append(Request) Request.thrift_spec = ( None, # 0 (1, TType.STRUCT,", "not (self == other) class HttpRequest(object): \"\"\" Attributes: - method - headers -", "oprot): if oprot._fast_encode is not None and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__,", "not None: oprot.writeFieldBegin('routeId', TType.I64, 1) oprot.writeI64(self.routeId) oprot.writeFieldEnd() if self.baseUrl is not None: oprot.writeFieldBegin('baseUrl',", "ftype == TType.STRING: self.email = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()", "None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('RpcRequest') if self.arguments is not None: oprot.writeFieldBegin('arguments', TType.STRING,", "iprot.skip(ftype) elif fid == 4: if ftype == TType.STRING: self.name = iprot.readString().decode('utf-8', errors='replace')", "else: iprot.skip(ftype) elif fid == 8: if ftype == TType.LIST: self.parameters = []", "= status self.name = name self.email = email self.points = points def read(self,", "[Log, None], False), None, ), # 3 ) all_structs.append(Response) Response.thrift_spec = ( None,", "None, # 0 (1, TType.STRUCT, 'response', [Response, None], None, ), # 1 (2,", "_elem55.read(iprot) self.events.append(_elem55) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 3: if ftype == TType.LIST:", "None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Action') if self.name is not None: oprot.writeFieldBegin('name', TType.STRING,", "self.response.read(iprot) else: iprot.skip(ftype) elif fid == 2: if ftype == TType.LIST: self.events =", "other) class Request(object): \"\"\" Attributes: - http - rpc \"\"\" def __init__(self, http=None,", "iprot.skip(ftype) elif fid == 4: if ftype == TType.STRUCT: self.user = User() self.user.read(iprot)", "== TType.STRING: self.url = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() else:", "2 (3, TType.I64, 'categoryId', None, None, ), # 3 (4, TType.I32, 'status', None,", "TType.LIST, 2) oprot.writeListBegin(TType.STRUCT, len(self.events)) for iter62 in self.events: iter62.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.logs", "iprot.readString() else: iprot.skip(ftype) elif fid == 6: if ftype == TType.STRING: self.email =", "other.__dict__ def __ne__(self, other): return not (self == other) class Log(object): \"\"\" Attributes:", "ftype == TType.LIST: self.scopes = [] (_etype39, _size36) = iprot.readListBegin() for _i40 in", "None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('HttpRequest') if self.method is not None: oprot.writeFieldBegin('method', TType.STRING,", "self.scopes = [] (_etype39, _size36) = iprot.readListBegin() for _i40 in range(_size36): _elem41 =", "2 else iprot.readString() self.headers[_key14] = _val15 iprot.readMapEnd() else: iprot.skip(ftype) elif fid == 3:", "= response self.events = events self.logs = logs def read(self, iprot): if iprot._fast_decode", "- type - config \"\"\" def __init__(self, name=None, type=None, config=None,): self.name = name", "for _i54 in range(_size50): _elem55 = Event() _elem55.read(iprot) self.events.append(_elem55) iprot.readListEnd() else: iprot.skip(ftype) elif", "== 2: if ftype == TType.STRUCT: self.request = Request() self.request.read(iprot) else: iprot.skip(ftype) elif", "== other.__dict__ def __ne__(self, other): return not (self == other) class Execute(object): \"\"\"", "other) class Result(object): \"\"\" Attributes: - response - events - logs \"\"\" def", "5: if ftype == TType.STRING: self.url = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2", "(8, TType.LIST, 'parameters', (TType.STRING, 'UTF8', False), None, ), # 8 ) all_structs.append(User) User.thrift_spec", "(key, value) for key, value in self.__dict__.items()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L))", "# 1 (2, TType.STRING, 'code', 'UTF8', None, ), # 2 ) all_structs.append(Execute) Execute.thrift_spec", "2 else kiter71) oprot.writeString(viter72.encode('utf-8') if sys.version_info[0] == 2 else viter72) oprot.writeMapEnd() oprot.writeFieldEnd() if", "== 3: if ftype == TType.STRUCT: self.app = App() self.app.read(iprot) else: iprot.skip(ftype) elif", "oprot.writeStructBegin('App') if self.id is not None: oprot.writeFieldBegin('id', TType.I64, 1) oprot.writeI64(self.id) oprot.writeFieldEnd() if self.userId", "_elem47 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() self.parameters.append(_elem47) iprot.readListEnd() else:", "= Request() self.request.read(iprot) else: iprot.skip(ftype) elif fid == 3: if ftype == TType.STRUCT:", "= ( None, # 0 (1, TType.STRUCT, 'response', [Response, None], None, ), #", "is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Result') if self.response is not None:", "== 2 else iprot.readString() _val29 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else", "( None, # 0 (1, TType.STRING, 'method', 'UTF8', None, ), # 1 (2,", "(_ktype10, _vtype11, _size9) = iprot.readMapBegin() for _i13 in range(_size9): _key14 = iprot.readString().decode('utf-8', errors='replace')", "(1, TType.BOOL, 'success', None, None, ), # 1 (2, TType.STRING, 'message', 'UTF8', None,", "= iprot.readI64() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.I64: self.userId", "break if fid == 1: if ftype == TType.STRING: self.action = iprot.readString().decode('utf-8', errors='replace')", "self.action = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() else: iprot.skip(ftype) elif", "), # 1 (2, TType.STRING, 'code', 'UTF8', None, ), # 2 ) all_structs.append(Execute)", "if self.code is not None: oprot.writeFieldBegin('code', TType.STRING, 2) oprot.writeString(self.code.encode('utf-8') if sys.version_info[0] == 2", "== TType.STRING: self.appKey = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() else:", "iprot.readI64() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.I64: self.roleId =", "iprot.skip(ftype) elif fid == 5: if ftype == TType.STRING: self.url = iprot.readString().decode('utf-8', errors='replace')", "oprot.writeFieldEnd() if self.user is not None: oprot.writeFieldBegin('user', TType.STRUCT, 4) self.user.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd()", "- status - name - url - appKey - scopes - parameters \"\"\"", "self.statusCode is not None: oprot.writeFieldBegin('statusCode', TType.I32, 1) oprot.writeI32(self.statusCode) oprot.writeFieldEnd() if self.headers is not", "[self.__class__, self.thrift_spec]) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype", "if self.user is not None: oprot.writeFieldBegin('user', TType.STRUCT, 4) self.user.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def", "is not None: oprot.writeFieldBegin('body', TType.STRING, 5) oprot.writeString(self.body.encode('utf-8') if sys.version_info[0] == 2 else self.body)", "if sys.version_info[0] == 2 else iprot.readString() else: iprot.skip(ftype) elif fid == 5: if", "self.data) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r' %", "oprot.writeFieldBegin('type', TType.STRING, 2) oprot.writeString(self.type.encode('utf-8') if sys.version_info[0] == 2 else self.type) oprot.writeFieldEnd() if self.config", "- name - url - appKey - scopes - parameters \"\"\" def __init__(self,", "__ne__(self, other): return not (self == other) class Result(object): \"\"\" Attributes: - response", "self.config[_key5] = _val6 iprot.readMapEnd() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot):", "% (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ ==", "TType.STOP: break if fid == 1: if ftype == TType.I64: self.routeId = iprot.readI64()", "self.categoryId = iprot.readI64() else: iprot.skip(ftype) elif fid == 4: if ftype == TType.I32:", "3: if ftype == TType.I64: self.categoryId = iprot.readI64() else: iprot.skip(ftype) elif fid ==", "not None: oprot.writeFieldBegin('body', TType.STRING, 5) oprot.writeString(self.body.encode('utf-8') if sys.version_info[0] == 2 else self.body) oprot.writeFieldEnd()", "fid == 2: if ftype == TType.STRING: self.code = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0]", "0 (1, TType.STRING, 'method', 'UTF8', None, ), # 1 (2, TType.MAP, 'headers', (TType.STRING,", "iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd()", "from thrift.protocol.TProtocol import TProtocolException from thrift.TRecursive import fix_spec import sys from thrift.transport import", "not None: oprot.writeFieldBegin('uriFragments', TType.MAP, 3) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.uriFragments)) for kiter32, viter33 in self.uriFragments.items():", "else: iprot.skip(ftype) elif fid == 2: if ftype == TType.I64: self.userId = iprot.readI64()", "self.http = HttpRequest() self.http.read(iprot) else: iprot.skip(ftype) elif fid == 2: if ftype ==", "# 1 (2, TType.STRING, 'type', 'UTF8', None, ), # 2 (3, TType.MAP, 'config',", "iprot.readString() _val70 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() self.headers[_key69] =", "== TType.I32: self.points = iprot.readI32() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self,", "TException, TApplicationException from thrift.protocol.TProtocol import TProtocolException from thrift.TRecursive import fix_spec import sys from", "# 2 (3, TType.I64, 'categoryId', None, None, ), # 3 (4, TType.I32, 'status',", "ftype == TType.LIST: self.events = [] (_etype53, _size50) = iprot.readListBegin() for _i54 in", "TType.I64: self.roleId = iprot.readI64() else: iprot.skip(ftype) elif fid == 3: if ftype ==", "== 7: if ftype == TType.I32: self.points = iprot.readI32() else: iprot.skip(ftype) else: iprot.skip(ftype)", "scopes self.parameters = parameters def read(self, iprot): if iprot._fast_decode is not None and", "other) class User(object): \"\"\" Attributes: - id - roleId - categoryId - status", "iprot.readBool() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRING: self.message =", "other): return not (self == other) class Result(object): \"\"\" Attributes: - response -", "TType.I32, 'status', None, None, ), # 3 (4, TType.STRING, 'name', 'UTF8', None, ),", "2 else kiter7) oprot.writeString(viter8.encode('utf-8') if sys.version_info[0] == 2 else viter8) oprot.writeMapEnd() oprot.writeFieldEnd() oprot.writeFieldStop()", "is not None: oprot.writeFieldBegin('config', TType.MAP, 3) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.config)) for kiter7, viter8 in", "'data', 'UTF8', None, ), # 2 ) all_structs.append(Log) Log.thrift_spec = ( None, #", "self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class HttpRequest(object):", "for kiter71, viter72 in self.headers.items(): oprot.writeString(kiter71.encode('utf-8') if sys.version_info[0] == 2 else kiter71) oprot.writeString(viter72.encode('utf-8')", "ftype == TType.STOP: break if fid == 1: if ftype == TType.STRING: self.eventName", "iprot.readString() else: iprot.skip(ftype) elif fid == 3: if ftype == TType.MAP: self.config =", "not None: oprot.writeFieldBegin('userId', TType.I64, 2) oprot.writeI64(self.userId) oprot.writeFieldEnd() if self.status is not None: oprot.writeFieldBegin('status',", "HttpRequest() self.http.read(iprot) else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRUCT: self.rpc", "= ( None, # 0 (1, TType.STRING, 'arguments', 'UTF8', None, ), # 1", "and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Connection') if self.name is", "self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Context') if self.routeId is not", "None, ), # 1 (2, TType.STRING, 'type', 'UTF8', None, ), # 2 (3,", "= iprot.readI64() else: iprot.skip(ftype) elif fid == 3: if ftype == TType.I32: self.status", "TType.STRUCT, 3) self.context.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L =", "= iprot.readListBegin() for _i40 in range(_size36): _elem41 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] ==", "4: if ftype == TType.STRUCT: self.user = User() self.user.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype)", "fid == 5: if ftype == TType.STRING: self.url = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0]", "'uriFragments', (TType.STRING, 'UTF8', TType.STRING, 'UTF8', False), None, ), # 3 (4, TType.MAP, 'parameters',", "# 8 ) all_structs.append(User) User.thrift_spec = ( None, # 0 (1, TType.I64, 'id',", "else iprot.readString() _val6 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() self.config[_key5]", "not None: iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) return iprot.readStructBegin() while True: (fname, ftype, fid)", "oprot.writeFieldBegin('name', TType.STRING, 1) oprot.writeString(self.name.encode('utf-8') if sys.version_info[0] == 2 else self.name) oprot.writeFieldEnd() if self.type", "= action self.request = request self.context = context def read(self, iprot): if iprot._fast_decode", "__ne__(self, other): return not (self == other) class RpcRequest(object): \"\"\" Attributes: - arguments", "), # 2 ) all_structs.append(Connection) Connection.thrift_spec = ( None, # 0 (1, TType.STRING,", "2 else iprot.readString() else: iprot.skip(ftype) elif fid == 5: if ftype == TType.STRING:", "2) oprot.writeI64(self.roleId) oprot.writeFieldEnd() if self.categoryId is not None: oprot.writeFieldBegin('categoryId', TType.I64, 3) oprot.writeI64(self.categoryId) oprot.writeFieldEnd()", "(1, TType.I64, 'id', None, None, ), # 1 (2, TType.I64, 'userId', None, None,", "iprot.skip(ftype) elif fid == 3: if ftype == TType.MAP: self.uriFragments = {} (_ktype17,", "thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException from thrift.protocol.TProtocol import TProtocolException from thrift.TRecursive", "else self.action) oprot.writeFieldEnd() if self.request is not None: oprot.writeFieldBegin('request', TType.STRUCT, 2) self.request.write(oprot) oprot.writeFieldEnd()", "\"\"\" def __init__(self, method=None, headers=None, uriFragments=None, parameters=None, body=None,): self.method = method self.headers =", "None], None, ), # 1 (2, TType.STRUCT, 'rpc', [RpcRequest, None], None, ), #", "iprot.skip(ftype) elif fid == 2: if ftype == TType.STRING: self.baseUrl = iprot.readString().decode('utf-8', errors='replace')", "not (self == other) all_structs.append(Message) Message.thrift_spec = ( None, # 0 (1, TType.BOOL,", "'baseUrl', 'UTF8', None, ), # 2 (3, TType.STRUCT, 'app', [App, None], None, ),", "1 (2, TType.STRING, 'baseUrl', 'UTF8', None, ), # 2 (3, TType.STRUCT, 'app', [App,", "not None: oprot.writeFieldBegin('scopes', TType.LIST, 7) oprot.writeListBegin(TType.STRING, len(self.scopes)) for iter48 in self.scopes: oprot.writeString(iter48.encode('utf-8') if", "else iter48) oprot.writeListEnd() oprot.writeFieldEnd() if self.parameters is not None: oprot.writeFieldBegin('parameters', TType.LIST, 8) oprot.writeListBegin(TType.STRING,", "if self.headers is not None: oprot.writeFieldBegin('headers', TType.MAP, 2) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.headers)) for kiter30,", "None, ), # 7 (8, TType.LIST, 'parameters', (TType.STRING, 'UTF8', False), None, ), #", "oprot.writeString(kiter32.encode('utf-8') if sys.version_info[0] == 2 else kiter32) oprot.writeString(viter33.encode('utf-8') if sys.version_info[0] == 2 else", "self.headers = {} (_ktype10, _vtype11, _size9) = iprot.readMapBegin() for _i13 in range(_size9): _key14", "self.url is not None: oprot.writeFieldBegin('url', TType.STRING, 5) oprot.writeString(self.url.encode('utf-8') if sys.version_info[0] == 2 else", "(_etype59, _size56) = iprot.readListBegin() for _i60 in range(_size56): _elem61 = Log() _elem61.read(iprot) self.logs.append(_elem61)", "== 1: if ftype == TType.STRING: self.name = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] ==", "0 (1, TType.STRING, 'eventName', 'UTF8', None, ), # 1 (2, TType.STRING, 'data', 'UTF8',", "\"\"\" def __init__(self, id=None, userId=None, status=None, name=None, url=None, appKey=None, scopes=None, parameters=None,): self.id =", "self.body = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() else: iprot.skip(ftype) else:", "TType.STRING, 1) oprot.writeString(self.eventName.encode('utf-8') if sys.version_info[0] == 2 else self.eventName) oprot.writeFieldEnd() if self.data is", "parameters=None, body=None,): self.method = method self.headers = headers self.uriFragments = uriFragments self.parameters =", "oprot.writeFieldBegin('eventName', TType.STRING, 1) oprot.writeString(self.eventName.encode('utf-8') if sys.version_info[0] == 2 else self.eventName) oprot.writeFieldEnd() if self.data", "== 1: if ftype == TType.BOOL: self.success = iprot.readBool() else: iprot.skip(ftype) elif fid", "if self.headers is not None: oprot.writeFieldBegin('headers', TType.MAP, 2) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.headers)) for kiter71,", "oprot.writeFieldEnd() if self.app is not None: oprot.writeFieldBegin('app', TType.STRUCT, 3) self.app.write(oprot) oprot.writeFieldEnd() if self.user", "= ( None, # 0 (1, TType.STRUCT, 'http', [HttpRequest, None], None, ), #", "TType.STOP: break if fid == 1: if ftype == TType.STRING: self.arguments = iprot.readString().decode('utf-8',", "3 (4, TType.STRUCT, 'user', [User, None], None, ), # 4 ) all_structs.append(App) App.thrift_spec", "_i13 in range(_size9): _key14 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()", "= logs def read(self, iprot): if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport)", "fid == 1: if ftype == TType.STRUCT: self.response = Response() self.response.read(iprot) else: iprot.skip(ftype)", "1 (2, TType.STRUCT, 'request', [Request, None], None, ), # 2 (3, TType.STRUCT, 'context',", "not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('App') if self.id is not None: oprot.writeFieldBegin('id',", "= iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() self.uriFragments[_key21] = _val22 iprot.readMapEnd()", "2 else iprot.readString() self.config[_key5] = _val6 iprot.readMapEnd() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd()", "None], False), None, ), # 3 ) all_structs.append(Response) Response.thrift_spec = ( None, #", "( None, # 0 (1, TType.STRING, 'action', 'UTF8', None, ), # 1 (2,", "iprot.readI32() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.MAP: self.headers =", "def __init__(self, level=None, message=None,): self.level = level self.message = message def read(self, iprot):", "7 ) all_structs.append(Result) Result.thrift_spec = ( None, # 0 (1, TType.STRUCT, 'response', [Response,", "kiter32) oprot.writeString(viter33.encode('utf-8') if sys.version_info[0] == 2 else viter33) oprot.writeMapEnd() oprot.writeFieldEnd() if self.parameters is", "self.name) oprot.writeFieldEnd() if self.code is not None: oprot.writeFieldBegin('code', TType.STRING, 2) oprot.writeString(self.code.encode('utf-8') if sys.version_info[0]", "oprot.writeMapEnd() oprot.writeFieldEnd() if self.uriFragments is not None: oprot.writeFieldBegin('uriFragments', TType.MAP, 3) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.uriFragments))", "), # 2 (3, TType.STRUCT, 'context', [Context, None], None, ), # 3 )", "method=None, headers=None, uriFragments=None, parameters=None, body=None,): self.method = method self.headers = headers self.uriFragments =", "iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None: iprot._fast_decode(self,", "else: iprot.skip(ftype) elif fid == 7: if ftype == TType.LIST: self.scopes = []", "None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Log') if self.level is not None: oprot.writeFieldBegin('level', TType.STRING,", "oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('App') if self.id is not None: oprot.writeFieldBegin('id', TType.I64, 1)", "(self == other) class App(object): \"\"\" Attributes: - id - userId - status", "== TType.I32: self.status = iprot.readI32() else: iprot.skip(ftype) elif fid == 4: if ftype", "if ftype == TType.STRUCT: self.request = Request() self.request.read(iprot) else: iprot.skip(ftype) elif fid ==", "# 2 (3, TType.MAP, 'uriFragments', (TType.STRING, 'UTF8', TType.STRING, 'UTF8', False), None, ), #", "3) oprot.writeI64(self.categoryId) oprot.writeFieldEnd() if self.status is not None: oprot.writeFieldBegin('status', TType.I32, 4) oprot.writeI32(self.status) oprot.writeFieldEnd()", "elif fid == 2: if ftype == TType.LIST: self.events = [] (_etype53, _size50)", "), # 1 (2, TType.MAP, 'headers', (TType.STRING, 'UTF8', TType.STRING, 'UTF8', False), None, ),", "TType.LIST, 'scopes', (TType.STRING, 'UTF8', False), None, ), # 7 (8, TType.LIST, 'parameters', (TType.STRING,", "6: if ftype == TType.STRING: self.appKey = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2", "iter62 in self.events: iter62.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.logs is not None: oprot.writeFieldBegin('logs', TType.LIST,", "TType.I32, 'status', None, None, ), # 4 (5, TType.STRING, 'name', 'UTF8', None, ),", "for kiter34, viter35 in self.parameters.items(): oprot.writeString(kiter34.encode('utf-8') if sys.version_info[0] == 2 else kiter34) oprot.writeString(viter35.encode('utf-8')", "return not (self == other) class Log(object): \"\"\" Attributes: - level - message", "oprot.writeFieldBegin('routeId', TType.I64, 1) oprot.writeI64(self.routeId) oprot.writeFieldEnd() if self.baseUrl is not None: oprot.writeFieldBegin('baseUrl', TType.STRING, 2)", "self.thrift_spec is not None: iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) return iprot.readStructBegin() while True: (fname,", "), # 3 (4, TType.STRING, 'name', 'UTF8', None, ), # 4 (5, TType.STRING,", "oprot.writeFieldBegin('parameters', TType.MAP, 4) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.parameters)) for kiter34, viter35 in self.parameters.items(): oprot.writeString(kiter34.encode('utf-8') if", "2: if ftype == TType.STRING: self.message = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2", "self.appKey is not None: oprot.writeFieldBegin('appKey', TType.STRING, 6) oprot.writeString(self.appKey.encode('utf-8') if sys.version_info[0] == 2 else", "for iter62 in self.events: iter62.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.logs is not None: oprot.writeFieldBegin('logs',", "- eventName - data \"\"\" def __init__(self, eventName=None, data=None,): self.eventName = eventName self.data", "2 (3, TType.LIST, 'logs', (TType.STRUCT, [Log, None], False), None, ), # 3 )", "Context(object): \"\"\" Attributes: - routeId - baseUrl - app - user \"\"\" def", "oprot.writeFieldBegin('status', TType.I32, 3) oprot.writeI32(self.status) oprot.writeFieldEnd() if self.name is not None: oprot.writeFieldBegin('name', TType.STRING, 4)", "(1, TType.STRING, 'name', 'UTF8', None, ), # 1 (2, TType.STRING, 'type', 'UTF8', None,", "self.url = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() else: iprot.skip(ftype) elif", "is not None: oprot.writeFieldBegin('status', TType.I32, 3) oprot.writeI32(self.status) oprot.writeFieldEnd() if self.name is not None:", "'name', 'UTF8', None, ), # 4 (5, TType.STRING, 'url', 'UTF8', None, ), #", "def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.items()]", "oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Message') if self.success is not None: oprot.writeFieldBegin('success', TType.BOOL, 1)", "sys.version_info[0] == 2 else self.eventName) oprot.writeFieldEnd() if self.data is not None: oprot.writeFieldBegin('data', TType.STRING,", "sys.version_info[0] == 2 else iprot.readString() _val22 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2", "3) oprot.writeString(self.body.encode('utf-8') if sys.version_info[0] == 2 else self.body) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self):", "- method - headers - uriFragments - parameters - body \"\"\" def __init__(self,", "# 0 (1, TType.BOOL, 'success', None, None, ), # 1 (2, TType.STRING, 'message',", "and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('HttpRequest') if self.method is", "sys.version_info[0] == 2 else kiter71) oprot.writeString(viter72.encode('utf-8') if sys.version_info[0] == 2 else viter72) oprot.writeMapEnd()", "ftype == TType.STRING: self.data = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()", "oprot.writeI32(self.points) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r' %", "if self.data is not None: oprot.writeFieldBegin('data', TType.STRING, 2) oprot.writeString(self.data.encode('utf-8') if sys.version_info[0] == 2", "'status', None, None, ), # 3 (4, TType.STRING, 'name', 'UTF8', None, ), #", "sys.version_info[0] == 2 else self.message) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self):", "oprot.writeString(iter49.encode('utf-8') if sys.version_info[0] == 2 else iter49) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self):", "TType.STRING, 'UTF8', False), None, ), # 3 (4, TType.MAP, 'parameters', (TType.STRING, 'UTF8', TType.STRING,", "not None: oprot.writeFieldBegin('message', TType.STRING, 2) oprot.writeString(self.message.encode('utf-8') if sys.version_info[0] == 2 else self.message) oprot.writeFieldEnd()", "def __ne__(self, other): return not (self == other) class Action(object): \"\"\" Attributes: -", "if sys.version_info[0] == 2 else kiter34) oprot.writeString(viter35.encode('utf-8') if sys.version_info[0] == 2 else viter35)", "= appKey self.scopes = scopes self.parameters = parameters def read(self, iprot): if iprot._fast_decode", "== 2 else iprot.readString() else: iprot.skip(ftype) elif fid == 7: if ftype ==", "oprot.writeFieldBegin('statusCode', TType.I32, 1) oprot.writeI32(self.statusCode) oprot.writeFieldEnd() if self.headers is not None: oprot.writeFieldBegin('headers', TType.MAP, 2)", "def __ne__(self, other): return not (self == other) class User(object): \"\"\" Attributes: -", "context \"\"\" def __init__(self, action=None, request=None, context=None,): self.action = action self.request = request", "2: if ftype == TType.MAP: self.headers = {} (_ktype10, _vtype11, _size9) = iprot.readMapBegin()", "if self.arguments is not None: oprot.writeFieldBegin('arguments', TType.STRING, 1) oprot.writeString(self.arguments.encode('utf-8') if sys.version_info[0] == 2", "( None, # 0 (1, TType.I32, 'statusCode', None, None, ), # 1 (2,", "oprot.writeFieldEnd() if self.config is not None: oprot.writeFieldBegin('config', TType.MAP, 3) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.config)) for", "iprot.skip(ftype) elif fid == 3: if ftype == TType.MAP: self.config = {} (_ktype1,", "TType.LIST: self.events = [] (_etype53, _size50) = iprot.readListBegin() for _i54 in range(_size50): _elem55", "elif fid == 2: if ftype == TType.I64: self.roleId = iprot.readI64() else: iprot.skip(ftype)", "\"\"\" def __init__(self, name=None, type=None, config=None,): self.name = name self.type = type self.config", "= iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() self.parameters.append(_elem47) iprot.readListEnd() else: iprot.skip(ftype)", "fid == 3: if ftype == TType.STRUCT: self.context = Context() self.context.read(iprot) else: iprot.skip(ftype)", "2) oprot.writeString(self.code.encode('utf-8') if sys.version_info[0] == 2 else self.code) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self):", "is not None and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Context')", "else self.name) oprot.writeFieldEnd() if self.url is not None: oprot.writeFieldBegin('url', TType.STRING, 5) oprot.writeString(self.url.encode('utf-8') if", "success - message \"\"\" def __init__(self, success=None, message=None,): self.success = success self.message =", "self.url = url self.appKey = appKey self.scopes = scopes self.parameters = parameters def", "None, # 0 (1, TType.I32, 'statusCode', None, None, ), # 1 (2, TType.MAP,", "return not (self == other) all_structs.append(Message) Message.thrift_spec = ( None, # 0 (1,", "Thrift Compiler (0.14.2) # # DO NOT EDIT UNLESS YOU ARE SURE THAT", "iprot.readString() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRING: self.data =", "self.request = Request() self.request.read(iprot) else: iprot.skip(ftype) elif fid == 3: if ftype ==", "action self.request = request self.context = context def read(self, iprot): if iprot._fast_decode is", "2 ) all_structs.append(Log) Log.thrift_spec = ( None, # 0 (1, TType.STRING, 'level', 'UTF8',", "ftype == TType.I64: self.roleId = iprot.readI64() else: iprot.skip(ftype) elif fid == 3: if", "if self.baseUrl is not None: oprot.writeFieldBegin('baseUrl', TType.STRING, 2) oprot.writeString(self.baseUrl.encode('utf-8') if sys.version_info[0] == 2", "break if fid == 1: if ftype == TType.I64: self.routeId = iprot.readI64() else:", "not None: oprot.writeFieldBegin('events', TType.LIST, 2) oprot.writeListBegin(TType.STRUCT, len(self.events)) for iter62 in self.events: iter62.write(oprot) oprot.writeListEnd()", "sys.version_info[0] == 2 else self.level) oprot.writeFieldEnd() if self.message is not None: oprot.writeFieldBegin('message', TType.STRING,", "viter33) oprot.writeMapEnd() oprot.writeFieldEnd() if self.parameters is not None: oprot.writeFieldBegin('parameters', TType.MAP, 4) oprot.writeMapBegin(TType.STRING, TType.STRING,", "ftype == TType.STOP: break if fid == 1: if ftype == TType.STRING: self.arguments", "- success - message \"\"\" def __init__(self, success=None, message=None,): self.success = success self.message", "# 4 (5, TType.STRING, 'url', 'UTF8', None, ), # 5 (6, TType.STRING, 'appKey',", "if ftype == TType.STRING: self.email = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else", "self.request.write(oprot) oprot.writeFieldEnd() if self.context is not None: oprot.writeFieldBegin('context', TType.STRUCT, 3) self.context.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop()", "if sys.version_info[0] == 2 else self.type) oprot.writeFieldEnd() if self.config is not None: oprot.writeFieldBegin('config',", "oprot.writeFieldEnd() if self.status is not None: oprot.writeFieldBegin('status', TType.I32, 4) oprot.writeI32(self.status) oprot.writeFieldEnd() if self.name", "TType.STRING, 'UTF8', False), None, ), # 4 (5, TType.STRING, 'body', 'UTF8', None, ),", "= iprot.readMapBegin() for _i68 in range(_size64): _key69 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] ==", "self.request.read(iprot) else: iprot.skip(ftype) elif fid == 3: if ftype == TType.STRUCT: self.context =", "events - logs \"\"\" def __init__(self, response=None, events=None, logs=None,): self.response = response self.events", "- email - points \"\"\" def __init__(self, id=None, roleId=None, categoryId=None, status=None, name=None, email=None,", "2 else iprot.readString() _val70 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()", "'type', 'UTF8', None, ), # 2 (3, TType.MAP, 'config', (TType.STRING, 'UTF8', TType.STRING, 'UTF8',", "), # 1 (2, TType.I64, 'userId', None, None, ), # 2 (3, TType.I32,", "TType, TMessageType, TFrozenDict, TException, TApplicationException from thrift.protocol.TProtocol import TProtocolException from thrift.TRecursive import fix_spec", "TType.STRUCT, 2) self.rpc.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L =", "if self.name is not None: oprot.writeFieldBegin('name', TType.STRING, 5) oprot.writeString(self.name.encode('utf-8') if sys.version_info[0] == 2", "sys.version_info[0] == 2 else self.url) oprot.writeFieldEnd() if self.appKey is not None: oprot.writeFieldBegin('appKey', TType.STRING,", "def write(self, oprot): if oprot._fast_encode is not None and self.thrift_spec is not None:", "1: if ftype == TType.STRING: self.action = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2", "self.status = iprot.readI32() else: iprot.skip(ftype) elif fid == 4: if ftype == TType.STRING:", "== TType.STOP: break if fid == 1: if ftype == TType.STRUCT: self.http =", "= iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() else: iprot.skip(ftype) elif fid", "oprot.writeString(self.level.encode('utf-8') if sys.version_info[0] == 2 else self.level) oprot.writeFieldEnd() if self.message is not None:", "class HttpRequest(object): \"\"\" Attributes: - method - headers - uriFragments - parameters -", "self.type = type self.config = config def read(self, iprot): if iprot._fast_decode is not", "TType.STRING: self.url = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() else: iprot.skip(ftype)", "sys.version_info[0] == 2 else iprot.readString() else: iprot.skip(ftype) elif fid == 6: if ftype", "other.__dict__ def __ne__(self, other): return not (self == other) class User(object): \"\"\" Attributes:", "not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Log') if self.level is not None: oprot.writeFieldBegin('level',", "arguments=None,): self.arguments = arguments def read(self, iprot): if iprot._fast_decode is not None and", "UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING # #", "_vtype2, _size0) = iprot.readMapBegin() for _i4 in range(_size0): _key5 = iprot.readString().decode('utf-8', errors='replace') if", "3) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.config)) for kiter7, viter8 in self.config.items(): oprot.writeString(kiter7.encode('utf-8') if sys.version_info[0] ==", "== TType.BOOL: self.success = iprot.readBool() else: iprot.skip(ftype) elif fid == 2: if ftype", "== 1: if ftype == TType.STRING: self.action = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] ==", "self.context = Context() self.context.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot):", "level self.message = message def read(self, iprot): if iprot._fast_decode is not None and", "else: iprot.skip(ftype) elif fid == 3: if ftype == TType.MAP: self.config = {}", "- parameters - body \"\"\" def __init__(self, method=None, headers=None, uriFragments=None, parameters=None, body=None,): self.method", "else viter35) oprot.writeMapEnd() oprot.writeFieldEnd() if self.body is not None: oprot.writeFieldBegin('body', TType.STRING, 5) oprot.writeString(self.body.encode('utf-8')", "5: if ftype == TType.STRING: self.name = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2", "iprot.readString() self.scopes.append(_elem41) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 8: if ftype == TType.LIST:", "TType.STRING, 1) oprot.writeString(self.method.encode('utf-8') if sys.version_info[0] == 2 else self.method) oprot.writeFieldEnd() if self.headers is", "oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('User') if self.id is not None: oprot.writeFieldBegin('id', TType.I64, 1)", "'logs', (TType.STRUCT, [Log, None], False), None, ), # 3 ) all_structs.append(Response) Response.thrift_spec =", "# 0 (1, TType.I64, 'routeId', None, None, ), # 1 (2, TType.STRING, 'baseUrl',", "iprot.skip(ftype) elif fid == 2: if ftype == TType.LIST: self.events = [] (_etype53,", "Response() self.response.read(iprot) else: iprot.skip(ftype) elif fid == 2: if ftype == TType.LIST: self.events", "is not None: oprot.writeFieldBegin('name', TType.STRING, 4) oprot.writeString(self.name.encode('utf-8') if sys.version_info[0] == 2 else self.name)", "sys.version_info[0] == 2 else kiter7) oprot.writeString(viter8.encode('utf-8') if sys.version_info[0] == 2 else viter8) oprot.writeMapEnd()", "is not None: oprot.writeFieldBegin('data', TType.STRING, 2) oprot.writeString(self.data.encode('utf-8') if sys.version_info[0] == 2 else self.data)", "iprot.readString() else: iprot.skip(ftype) elif fid == 7: if ftype == TType.LIST: self.scopes =", "if ftype == TType.STRING: self.data = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else", "sys.version_info[0] == 2 else kiter34) oprot.writeString(viter35.encode('utf-8') if sys.version_info[0] == 2 else viter35) oprot.writeMapEnd()", "id - roleId - categoryId - status - name - email - points", "Event() _elem55.read(iprot) self.events.append(_elem55) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 3: if ftype ==", "if fid == 1: if ftype == TType.I32: self.statusCode = iprot.readI32() else: iprot.skip(ftype)", "= {} (_ktype65, _vtype66, _size64) = iprot.readMapBegin() for _i68 in range(_size64): _key69 =", "3: if ftype == TType.STRUCT: self.app = App() self.app.read(iprot) else: iprot.skip(ftype) elif fid", "== TType.LIST: self.events = [] (_etype53, _size50) = iprot.readListBegin() for _i54 in range(_size50):", "not None: oprot.writeFieldBegin('body', TType.STRING, 3) oprot.writeString(self.body.encode('utf-8') if sys.version_info[0] == 2 else self.body) oprot.writeFieldEnd()", "None: oprot.writeFieldBegin('success', TType.BOOL, 1) oprot.writeBool(self.success) oprot.writeFieldEnd() if self.message is not None: oprot.writeFieldBegin('message', TType.STRING,", "# 1 (2, TType.STRUCT, 'rpc', [RpcRequest, None], None, ), # 2 ) all_structs.append(HttpRequest)", "eventName - data \"\"\" def __init__(self, eventName=None, data=None,): self.eventName = eventName self.data =", "( None, # 0 (1, TType.I64, 'id', None, None, ), # 1 (2,", "other): return not (self == other) class App(object): \"\"\" Attributes: - id -", "oprot.writeListBegin(TType.STRUCT, len(self.logs)) for iter63 in self.logs: iter63.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self):", "self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class Action(object):", "iprot.readI64() else: iprot.skip(ftype) elif fid == 3: if ftype == TType.I32: self.status =", "None: oprot.writeFieldBegin('method', TType.STRING, 1) oprot.writeString(self.method.encode('utf-8') if sys.version_info[0] == 2 else self.method) oprot.writeFieldEnd() if", "self.data = data def read(self, iprot): if iprot._fast_decode is not None and isinstance(iprot.trans,", "4 (5, TType.STRING, 'name', 'UTF8', None, ), # 5 (6, TType.STRING, 'email', 'UTF8',", "__init__(self, response=None, events=None, logs=None,): self.response = response self.events = events self.logs = logs", "if self.roleId is not None: oprot.writeFieldBegin('roleId', TType.I64, 2) oprot.writeI64(self.roleId) oprot.writeFieldEnd() if self.categoryId is", "self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('User') if self.id is not", "), # 6 (7, TType.LIST, 'scopes', (TType.STRING, 'UTF8', False), None, ), # 7", "== other.__dict__ def __ne__(self, other): return not (self == other) class Context(object): \"\"\"", "None and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Result') if self.response", "== 2 else iprot.readString() self.parameters[_key28] = _val29 iprot.readMapEnd() else: iprot.skip(ftype) elif fid ==", "self.type = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() else: iprot.skip(ftype) elif", "- name - code \"\"\" def __init__(self, name=None, code=None,): self.name = name self.code", "not None and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Request') if", "= [] (_etype39, _size36) = iprot.readListBegin() for _i40 in range(_size36): _elem41 = iprot.readString().decode('utf-8',", "ftype == TType.STOP: break if fid == 1: if ftype == TType.BOOL: self.success", "Result(object): \"\"\" Attributes: - response - events - logs \"\"\" def __init__(self, response=None,", "None: oprot.writeFieldBegin('id', TType.I64, 1) oprot.writeI64(self.id) oprot.writeFieldEnd() if self.userId is not None: oprot.writeFieldBegin('userId', TType.I64,", ") all_structs.append(App) App.thrift_spec = ( None, # 0 (1, TType.I64, 'id', None, None,", "if self.type is not None: oprot.writeFieldBegin('type', TType.STRING, 2) oprot.writeString(self.type.encode('utf-8') if sys.version_info[0] == 2", "_size36) = iprot.readListBegin() for _i40 in range(_size36): _elem41 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0]", "self.arguments) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r' %", "if sys.version_info[0] == 2 else self.method) oprot.writeFieldEnd() if self.headers is not None: oprot.writeFieldBegin('headers',", "is not None and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Log')", "TType.STRING, len(self.uriFragments)) for kiter32, viter33 in self.uriFragments.items(): oprot.writeString(kiter32.encode('utf-8') if sys.version_info[0] == 2 else", "# 1 (2, TType.STRING, 'message', 'UTF8', None, ), # 2 ) all_structs.append(Connection) Connection.thrift_spec", "), # 4 ) all_structs.append(App) App.thrift_spec = ( None, # 0 (1, TType.I64,", "TType.LIST, 'logs', (TType.STRUCT, [Log, None], False), None, ), # 3 ) all_structs.append(Response) Response.thrift_spec", "- code \"\"\" def __init__(self, name=None, code=None,): self.name = name self.code = code", "= url self.appKey = appKey self.scopes = scopes self.parameters = parameters def read(self,", "elif fid == 5: if ftype == TType.STRING: self.name = iprot.readString().decode('utf-8', errors='replace') if", "body def read(self, iprot): if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and", "[self.__class__, self.thrift_spec])) return oprot.writeStructBegin('App') if self.id is not None: oprot.writeFieldBegin('id', TType.I64, 1) oprot.writeI64(self.id)", "__init__(self, method=None, headers=None, uriFragments=None, parameters=None, body=None,): self.method = method self.headers = headers self.uriFragments", "= iprot.readI32() else: iprot.skip(ftype) elif fid == 4: if ftype == TType.STRING: self.name", "self.name) oprot.writeFieldEnd() if self.url is not None: oprot.writeFieldBegin('url', TType.STRING, 5) oprot.writeString(self.url.encode('utf-8') if sys.version_info[0]", "= _val29 iprot.readMapEnd() else: iprot.skip(ftype) elif fid == 5: if ftype == TType.STRING:", "TType.STRING, 'data', 'UTF8', None, ), # 2 ) all_structs.append(Log) Log.thrift_spec = ( None,", "self.arguments = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() else: iprot.skip(ftype) else:", "return not (self == other) class User(object): \"\"\" Attributes: - id - roleId", "self.logs.append(_elem61) iprot.readListEnd() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot._fast_encode", "(_ktype24, _vtype25, _size23) = iprot.readMapBegin() for _i27 in range(_size23): _key28 = iprot.readString().decode('utf-8', errors='replace')", "if ftype == TType.STOP: break if fid == 1: if ftype == TType.BOOL:", "self.routeId = iprot.readI64() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRING:", "for _i68 in range(_size64): _key69 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else", "method self.headers = headers self.uriFragments = uriFragments self.parameters = parameters self.body = body", "sys.version_info[0] == 2 else iprot.readString() _val15 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2", "not None: oprot.writeFieldBegin('request', TType.STRUCT, 2) self.request.write(oprot) oprot.writeFieldEnd() if self.context is not None: oprot.writeFieldBegin('context',", "baseUrl - app - user \"\"\" def __init__(self, routeId=None, baseUrl=None, app=None, user=None,): self.routeId", "TType.STRING: self.arguments = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() else: iprot.skip(ftype)", "fid == 7: if ftype == TType.LIST: self.scopes = [] (_etype39, _size36) =", "range(_size9): _key14 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() _val15 =", "'eventName', 'UTF8', None, ), # 1 (2, TType.STRING, 'data', 'UTF8', None, ), #", "'UTF8', None, ), # 6 (7, TType.I32, 'points', None, None, ), # 7", "if sys.version_info[0] == 2 else iprot.readString() else: iprot.skip(ftype) elif fid == 7: if", "len(self.events)) for iter62 in self.events: iter62.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.logs is not None:", "(5, TType.STRING, 'url', 'UTF8', None, ), # 5 (6, TType.STRING, 'appKey', 'UTF8', None,", "type self.config = config def read(self, iprot): if iprot._fast_decode is not None and", "TType.STRING: self.appKey = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() else: iprot.skip(ftype)", "oprot.writeFieldEnd() if self.rpc is not None: oprot.writeFieldBegin('rpc', TType.STRUCT, 2) self.rpc.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd()", "self.app.write(oprot) oprot.writeFieldEnd() if self.user is not None: oprot.writeFieldBegin('user', TType.STRUCT, 4) self.user.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop()", "class Request(object): \"\"\" Attributes: - http - rpc \"\"\" def __init__(self, http=None, rpc=None,):", "1 (2, TType.STRUCT, 'rpc', [RpcRequest, None], None, ), # 2 ) all_structs.append(HttpRequest) HttpRequest.thrift_spec", "TType.I64, 2) oprot.writeI64(self.userId) oprot.writeFieldEnd() if self.status is not None: oprot.writeFieldBegin('status', TType.I32, 3) oprot.writeI32(self.status)", "self.context is not None: oprot.writeFieldBegin('context', TType.STRUCT, 3) self.context.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self):", "== TType.STRUCT: self.context = Context() self.context.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def", "= message def read(self, iprot): if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport)", "self.method) oprot.writeFieldEnd() if self.headers is not None: oprot.writeFieldBegin('headers', TType.MAP, 2) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.headers))", "other) class HttpRequest(object): \"\"\" Attributes: - method - headers - uriFragments - parameters", "self.type) oprot.writeFieldEnd() if self.config is not None: oprot.writeFieldBegin('config', TType.MAP, 3) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.config))", "elif fid == 8: if ftype == TType.LIST: self.parameters = [] (_etype45, _size42)", "self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) all_structs.append(Message) Message.thrift_spec", "sys.version_info[0] == 2 else kiter30) oprot.writeString(viter31.encode('utf-8') if sys.version_info[0] == 2 else viter31) oprot.writeMapEnd()", "= iprot.readMapBegin() for _i20 in range(_size16): _key21 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] ==", "in range(_size16): _key21 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() _val22", "None, ), # 5 ) all_structs.append(RpcRequest) RpcRequest.thrift_spec = ( None, # 0 (1,", "'id', None, None, ), # 1 (2, TType.I64, 'userId', None, None, ), #", "self.events = [] (_etype53, _size50) = iprot.readListBegin() for _i54 in range(_size50): _elem55 =", "== 2 else viter33) oprot.writeMapEnd() oprot.writeFieldEnd() if self.parameters is not None: oprot.writeFieldBegin('parameters', TType.MAP,", "ftype == TType.STOP: break if fid == 1: if ftype == TType.STRUCT: self.http", "TType.STRUCT, 1) self.http.write(oprot) oprot.writeFieldEnd() if self.rpc is not None: oprot.writeFieldBegin('rpc', TType.STRUCT, 2) self.rpc.write(oprot)", "message \"\"\" def __init__(self, level=None, message=None,): self.level = level self.message = message def", "if fid == 1: if ftype == TType.STRING: self.action = iprot.readString().decode('utf-8', errors='replace') if", "TType.STRING, 5) oprot.writeString(self.body.encode('utf-8') if sys.version_info[0] == 2 else self.body) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def", "None, ), # 3 (4, TType.MAP, 'parameters', (TType.STRING, 'UTF8', TType.STRING, 'UTF8', False), None,", "== 7: if ftype == TType.LIST: self.scopes = [] (_etype39, _size36) = iprot.readListBegin()", "code def read(self, iprot): if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and", "not None: oprot.writeFieldBegin('email', TType.STRING, 6) oprot.writeString(self.email.encode('utf-8') if sys.version_info[0] == 2 else self.email) oprot.writeFieldEnd()", "== other) class Connection(object): \"\"\" Attributes: - name - type - config \"\"\"", "categoryId self.status = status self.name = name self.email = email self.points = points", "sys.version_info[0] == 2 else iter49) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def", "key, value in self.__dict__.items()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other):", "else kiter30) oprot.writeString(viter31.encode('utf-8') if sys.version_info[0] == 2 else viter31) oprot.writeMapEnd() oprot.writeFieldEnd() if self.uriFragments", "name=None, code=None,): self.name = name self.code = code def read(self, iprot): if iprot._fast_decode", "= parameters self.body = body def read(self, iprot): if iprot._fast_decode is not None", "Log(object): \"\"\" Attributes: - level - message \"\"\" def __init__(self, level=None, message=None,): self.level", "context=None,): self.action = action self.request = request self.context = context def read(self, iprot):", "YOU ARE DOING # # options string: py # from thrift.Thrift import TType,", "[self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Response') if self.statusCode is not None: oprot.writeFieldBegin('statusCode', TType.I32, 1) oprot.writeI32(self.statusCode)", "2 (3, TType.STRING, 'body', 'UTF8', None, ), # 3 ) all_structs.append(Event) Event.thrift_spec =", "1) oprot.writeBool(self.success) oprot.writeFieldEnd() if self.message is not None: oprot.writeFieldBegin('message', TType.STRING, 2) oprot.writeString(self.message.encode('utf-8') if", "if self.action is not None: oprot.writeFieldBegin('action', TType.STRING, 1) oprot.writeString(self.action.encode('utf-8') if sys.version_info[0] == 2", "5) oprot.writeString(self.body.encode('utf-8') if sys.version_info[0] == 2 else self.body) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self):", "if ftype == TType.LIST: self.events = [] (_etype53, _size50) = iprot.readListBegin() for _i54", "if sys.version_info[0] == 2 else iprot.readString() self.headers[_key14] = _val15 iprot.readMapEnd() else: iprot.skip(ftype) elif", "response=None, events=None, logs=None,): self.response = response self.events = events self.logs = logs def", "2 (3, TType.STRUCT, 'app', [App, None], None, ), # 3 (4, TType.STRUCT, 'user',", "Attributes: - success - message \"\"\" def __init__(self, success=None, message=None,): self.success = success", "( None, # 0 (1, TType.STRUCT, 'http', [HttpRequest, None], None, ), # 1", "self.name is not None: oprot.writeFieldBegin('name', TType.STRING, 5) oprot.writeString(self.name.encode('utf-8') if sys.version_info[0] == 2 else", "oprot.writeFieldEnd() if self.url is not None: oprot.writeFieldBegin('url', TType.STRING, 5) oprot.writeString(self.url.encode('utf-8') if sys.version_info[0] ==", "self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Result') if self.response is not", "oprot.writeFieldBegin('level', TType.STRING, 1) oprot.writeString(self.level.encode('utf-8') if sys.version_info[0] == 2 else self.level) oprot.writeFieldEnd() if self.message", "if oprot._fast_encode is not None and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))", "if ftype == TType.MAP: self.uriFragments = {} (_ktype17, _vtype18, _size16) = iprot.readMapBegin() for", "oprot.writeFieldEnd() if self.baseUrl is not None: oprot.writeFieldBegin('baseUrl', TType.STRING, 2) oprot.writeString(self.baseUrl.encode('utf-8') if sys.version_info[0] ==", "None, # 0 (1, TType.BOOL, 'success', None, None, ), # 1 (2, TType.STRING,", "if self.id is not None: oprot.writeFieldBegin('id', TType.I64, 1) oprot.writeI64(self.id) oprot.writeFieldEnd() if self.roleId is", "oprot.writeFieldEnd() if self.roleId is not None: oprot.writeFieldBegin('roleId', TType.I64, 2) oprot.writeI64(self.roleId) oprot.writeFieldEnd() if self.categoryId", "if sys.version_info[0] == 2 else iprot.readString() else: iprot.skip(ftype) elif fid == 2: if", "oprot.writeFieldEnd() if self.categoryId is not None: oprot.writeFieldBegin('categoryId', TType.I64, 3) oprot.writeI64(self.categoryId) oprot.writeFieldEnd() if self.status", "TType.STRING, 'name', 'UTF8', None, ), # 4 (5, TType.STRING, 'url', 'UTF8', None, ),", "[self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Result') if self.response is not None: oprot.writeFieldBegin('response', TType.STRUCT, 1) self.response.write(oprot)", "= user def read(self, iprot): if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport)", "is not None: oprot.writeFieldBegin('user', TType.STRUCT, 4) self.user.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return", "not None: oprot.writeFieldBegin('app', TType.STRUCT, 3) self.app.write(oprot) oprot.writeFieldEnd() if self.user is not None: oprot.writeFieldBegin('user',", "# 0 (1, TType.STRING, 'name', 'UTF8', None, ), # 1 (2, TType.STRING, 'code',", "is not None: oprot.writeFieldBegin('statusCode', TType.I32, 1) oprot.writeI32(self.statusCode) oprot.writeFieldEnd() if self.headers is not None:", "), # 1 (2, TType.STRING, 'message', 'UTF8', None, ), # 2 ) fix_spec(all_structs)", "== other) class Context(object): \"\"\" Attributes: - routeId - baseUrl - app -", "Attributes: - routeId - baseUrl - app - user \"\"\" def __init__(self, routeId=None,", "== TType.I64: self.categoryId = iprot.readI64() else: iprot.skip(ftype) elif fid == 4: if ftype", "== 2: if ftype == TType.LIST: self.events = [] (_etype53, _size50) = iprot.readListBegin()", "oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Result') if self.response is not None: oprot.writeFieldBegin('response', TType.STRUCT, 1)", "if self.method is not None: oprot.writeFieldBegin('method', TType.STRING, 1) oprot.writeString(self.method.encode('utf-8') if sys.version_info[0] == 2", "self.headers is not None: oprot.writeFieldBegin('headers', TType.MAP, 2) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.headers)) for kiter71, viter72", "oprot.writeString(kiter30.encode('utf-8') if sys.version_info[0] == 2 else kiter30) oprot.writeString(viter31.encode('utf-8') if sys.version_info[0] == 2 else", "1 (2, TType.STRING, 'code', 'UTF8', None, ), # 2 ) all_structs.append(Execute) Execute.thrift_spec =", "status - name - url - appKey - scopes - parameters \"\"\" def", "None, # 0 (1, TType.STRING, 'arguments', 'UTF8', None, ), # 1 ) all_structs.append(Context)", "routeId=None, baseUrl=None, app=None, user=None,): self.routeId = routeId self.baseUrl = baseUrl self.app = app", "= points def read(self, iprot): if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport)", "None], None, ), # 2 ) all_structs.append(HttpRequest) HttpRequest.thrift_spec = ( None, # 0", "= RpcRequest() self.rpc.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if", "return oprot.writeStructBegin('Message') if self.success is not None: oprot.writeFieldBegin('success', TType.BOOL, 1) oprot.writeBool(self.success) oprot.writeFieldEnd() if", "if sys.version_info[0] == 2 else iprot.readString() else: iprot.skip(ftype) elif fid == 3: if", "sys.version_info[0] == 2 else iter48) oprot.writeListEnd() oprot.writeFieldEnd() if self.parameters is not None: oprot.writeFieldBegin('parameters',", "elif fid == 2: if ftype == TType.STRUCT: self.rpc = RpcRequest() self.rpc.read(iprot) else:", "self.body) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r' %", "None, # 0 (1, TType.STRING, 'level', 'UTF8', None, ), # 1 (2, TType.STRING,", "self.level = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() else: iprot.skip(ftype) elif", "def __ne__(self, other): return not (self == other) class Request(object): \"\"\" Attributes: -", "1 (2, TType.LIST, 'events', (TType.STRUCT, [Event, None], False), None, ), # 2 (3,", "else: iprot.skip(ftype) elif fid == 5: if ftype == TType.STRING: self.body = iprot.readString().decode('utf-8',", "== 2 else iprot.readString() self.parameters.append(_elem47) iprot.readListEnd() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def", "is not None: oprot.writeFieldBegin('roleId', TType.I64, 2) oprot.writeI64(self.roleId) oprot.writeFieldEnd() if self.categoryId is not None:", "_val15 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() self.headers[_key14] = _val15", "fid == 1: if ftype == TType.BOOL: self.success = iprot.readBool() else: iprot.skip(ftype) elif", "iprot.skip(ftype) elif fid == 3: if ftype == TType.STRUCT: self.app = App() self.app.read(iprot)", "return oprot.writeStructBegin('Result') if self.response is not None: oprot.writeFieldBegin('response', TType.STRUCT, 1) self.response.write(oprot) oprot.writeFieldEnd() if", "= {} (_ktype1, _vtype2, _size0) = iprot.readMapBegin() for _i4 in range(_size0): _key5 =", "== TType.STRING: self.code = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() else:", "is not None: oprot.writeFieldBegin('rpc', TType.STRUCT, 2) self.rpc.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return", "== TType.STRING: self.level = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() else:", "== other.__dict__ def __ne__(self, other): return not (self == other) class HttpRequest(object): \"\"\"", "2 else self.appKey) oprot.writeFieldEnd() if self.scopes is not None: oprot.writeFieldBegin('scopes', TType.LIST, 7) oprot.writeListBegin(TType.STRING,", "if ftype == TType.STRING: self.name = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else", "[] (_etype45, _size42) = iprot.readListBegin() for _i46 in range(_size42): _elem47 = iprot.readString().decode('utf-8', errors='replace')", "sys.version_info[0] == 2 else viter35) oprot.writeMapEnd() oprot.writeFieldEnd() if self.body is not None: oprot.writeFieldBegin('body',", "ftype == TType.STRING: self.name = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()", "5) oprot.writeString(self.name.encode('utf-8') if sys.version_info[0] == 2 else self.name) oprot.writeFieldEnd() if self.email is not", "TType.LIST: self.scopes = [] (_etype39, _size36) = iprot.readListBegin() for _i40 in range(_size36): _elem41", "if ftype == TType.STRING: self.type = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else", "self.headers[_key14] = _val15 iprot.readMapEnd() else: iprot.skip(ftype) elif fid == 3: if ftype ==", "elif fid == 3: if ftype == TType.STRUCT: self.app = App() self.app.read(iprot) else:", "self.thrift_spec])) return oprot.writeStructBegin('Request') if self.http is not None: oprot.writeFieldBegin('http', TType.STRUCT, 1) self.http.write(oprot) oprot.writeFieldEnd()", "4 ) all_structs.append(App) App.thrift_spec = ( None, # 0 (1, TType.I64, 'id', None,", "value) for key, value in self.__dict__.items()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def", "- config \"\"\" def __init__(self, name=None, type=None, config=None,): self.name = name self.type =", "logs \"\"\" def __init__(self, response=None, events=None, logs=None,): self.response = response self.events = events", "if ftype == TType.STRUCT: self.app = App() self.app.read(iprot) else: iprot.skip(ftype) elif fid ==", "== 2 else kiter71) oprot.writeString(viter72.encode('utf-8') if sys.version_info[0] == 2 else viter72) oprot.writeMapEnd() oprot.writeFieldEnd()", "'UTF8', None, ), # 1 (2, TType.STRUCT, 'request', [Request, None], None, ), #", "self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class Connection(object):", "self.rpc.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot._fast_encode is", "(self == other) class User(object): \"\"\" Attributes: - id - roleId - categoryId", "if self.body is not None: oprot.writeFieldBegin('body', TType.STRING, 5) oprot.writeString(self.body.encode('utf-8') if sys.version_info[0] == 2", "\"\"\" Attributes: - eventName - data \"\"\" def __init__(self, eventName=None, data=None,): self.eventName =", "is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('App') if self.id is not None:", "None, # 0 (1, TType.I64, 'routeId', None, None, ), # 1 (2, TType.STRING,", "(self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__", "== TType.I64: self.userId = iprot.readI64() else: iprot.skip(ftype) elif fid == 3: if ftype", "TType.I64, 1) oprot.writeI64(self.id) oprot.writeFieldEnd() if self.roleId is not None: oprot.writeFieldBegin('roleId', TType.I64, 2) oprot.writeI64(self.roleId)", "= iprot.readI32() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot._fast_encode", "TType.STRING, 'type', 'UTF8', None, ), # 2 (3, TType.MAP, 'config', (TType.STRING, 'UTF8', TType.STRING,", "self.message = message def read(self, iprot): if iprot._fast_decode is not None and isinstance(iprot.trans,", "if sys.version_info[0] == 2 else iprot.readString() self.parameters.append(_elem47) iprot.readListEnd() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd()", "None, ), # 1 (2, TType.STRING, 'baseUrl', 'UTF8', None, ), # 2 (3,", "__ne__(self, other): return not (self == other) class Connection(object): \"\"\" Attributes: - name", "return oprot.writeStructBegin('Log') if self.level is not None: oprot.writeFieldBegin('level', TType.STRING, 1) oprot.writeString(self.level.encode('utf-8') if sys.version_info[0]", ") all_structs.append(Request) Request.thrift_spec = ( None, # 0 (1, TType.STRUCT, 'http', [HttpRequest, None],", "is not None: oprot.writeFieldBegin('headers', TType.MAP, 2) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.headers)) for kiter71, viter72 in", "= HttpRequest() self.http.read(iprot) else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRUCT:", "= [] (_etype45, _size42) = iprot.readListBegin() for _i46 in range(_size42): _elem47 = iprot.readString().decode('utf-8',", "2) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.headers)) for kiter30, viter31 in self.headers.items(): oprot.writeString(kiter30.encode('utf-8') if sys.version_info[0] ==", "self.http.read(iprot) else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRUCT: self.rpc =", "self.email = email self.points = points def read(self, iprot): if iprot._fast_decode is not", "oprot.writeFieldEnd() if self.type is not None: oprot.writeFieldBegin('type', TType.STRING, 2) oprot.writeString(self.type.encode('utf-8') if sys.version_info[0] ==", "= events self.logs = logs def read(self, iprot): if iprot._fast_decode is not None", "Attributes: - arguments \"\"\" def __init__(self, arguments=None,): self.arguments = arguments def read(self, iprot):", "self.id = id self.roleId = roleId self.categoryId = categoryId self.status = status self.name", "len(self.headers)) for kiter71, viter72 in self.headers.items(): oprot.writeString(kiter71.encode('utf-8') if sys.version_info[0] == 2 else kiter71)", "), # 7 ) all_structs.append(Result) Result.thrift_spec = ( None, # 0 (1, TType.STRUCT,", "else iprot.readString() else: iprot.skip(ftype) elif fid == 6: if ftype == TType.STRING: self.appKey", "ftype == TType.STRING: self.appKey = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()", "self.thrift_spec])) return oprot.writeStructBegin('Execute') if self.action is not None: oprot.writeFieldBegin('action', TType.STRING, 1) oprot.writeString(self.action.encode('utf-8') if", "# 1 (2, TType.STRING, 'baseUrl', 'UTF8', None, ), # 2 (3, TType.STRUCT, 'app',", "break if fid == 1: if ftype == TType.I32: self.statusCode = iprot.readI32() else:", "= data def read(self, iprot): if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport)", "None], None, ), # 3 (4, TType.STRUCT, 'user', [User, None], None, ), #", "TType.STOP: break if fid == 1: if ftype == TType.STRING: self.method = iprot.readString().decode('utf-8',", "self.response = Response() self.response.read(iprot) else: iprot.skip(ftype) elif fid == 2: if ftype ==", "elif fid == 2: if ftype == TType.STRING: self.data = iprot.readString().decode('utf-8', errors='replace') if", "# 0 (1, TType.I32, 'statusCode', None, None, ), # 1 (2, TType.MAP, 'headers',", "sys.version_info[0] == 2 else iprot.readString() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self,", "== 2 else iprot.readString() _val15 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else", "if self.statusCode is not None: oprot.writeFieldBegin('statusCode', TType.I32, 1) oprot.writeI32(self.statusCode) oprot.writeFieldEnd() if self.headers is", "== 5: if ftype == TType.STRING: self.url = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] ==", "uriFragments - parameters - body \"\"\" def __init__(self, method=None, headers=None, uriFragments=None, parameters=None, body=None,):", "None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Execute') if self.action is not None: oprot.writeFieldBegin('action', TType.STRING,", "fid == 1: if ftype == TType.STRING: self.name = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0]", "== TType.STOP: break if fid == 1: if ftype == TType.I64: self.routeId =", "2 else self.name) oprot.writeFieldEnd() if self.url is not None: oprot.writeFieldBegin('url', TType.STRING, 5) oprot.writeString(self.url.encode('utf-8')", "message def read(self, iprot): if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and", "App() self.app.read(iprot) else: iprot.skip(ftype) elif fid == 4: if ftype == TType.STRUCT: self.user", "Context.thrift_spec = ( None, # 0 (1, TType.I64, 'routeId', None, None, ), #", "ftype == TType.STRING: self.code = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()", "iprot.readMapBegin() for _i27 in range(_size23): _key28 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2", "oprot.writeString(self.arguments.encode('utf-8') if sys.version_info[0] == 2 else self.arguments) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return", "), # 3 (4, TType.I32, 'status', None, None, ), # 4 (5, TType.STRING,", "return not (self == other) class RpcRequest(object): \"\"\" Attributes: - arguments \"\"\" def", "= success self.message = message def read(self, iprot): if iprot._fast_decode is not None", "= iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() self.parameters[_key28] = _val29 iprot.readMapEnd()", "if sys.version_info[0] == 2 else self.body) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def", "else iprot.readString() self.parameters[_key28] = _val29 iprot.readMapEnd() else: iprot.skip(ftype) elif fid == 5: if", "'UTF8', None, ), # 6 (7, TType.LIST, 'scopes', (TType.STRING, 'UTF8', False), None, ),", "(3, TType.STRING, 'body', 'UTF8', None, ), # 3 ) all_structs.append(Event) Event.thrift_spec = (", "1: if ftype == TType.STRUCT: self.response = Response() self.response.read(iprot) else: iprot.skip(ftype) elif fid", "== 2 else self.body) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L", "fid == 2: if ftype == TType.MAP: self.headers = {} (_ktype10, _vtype11, _size9)", "= [] class Message(object): \"\"\" Attributes: - success - message \"\"\" def __init__(self,", "all_structs.append(Request) Request.thrift_spec = ( None, # 0 (1, TType.STRUCT, 'http', [HttpRequest, None], None,", "'.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self,", "Message.thrift_spec = ( None, # 0 (1, TType.BOOL, 'success', None, None, ), #", "== TType.STRING: self.name = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() else:", "- appKey - scopes - parameters \"\"\" def __init__(self, id=None, userId=None, status=None, name=None,", "'response', [Response, None], None, ), # 1 (2, TType.LIST, 'events', (TType.STRUCT, [Event, None],", "None: oprot.writeFieldBegin('eventName', TType.STRING, 1) oprot.writeString(self.eventName.encode('utf-8') if sys.version_info[0] == 2 else self.eventName) oprot.writeFieldEnd() if", "2: if ftype == TType.STRING: self.data = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2", "== 1: if ftype == TType.STRING: self.eventName = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] ==", "2) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.headers)) for kiter71, viter72 in self.headers.items(): oprot.writeString(kiter71.encode('utf-8') if sys.version_info[0] ==", "None: oprot.writeFieldBegin('code', TType.STRING, 2) oprot.writeString(self.code.encode('utf-8') if sys.version_info[0] == 2 else self.code) oprot.writeFieldEnd() oprot.writeFieldStop()", "self.events is not None: oprot.writeFieldBegin('events', TType.LIST, 2) oprot.writeListBegin(TType.STRUCT, len(self.events)) for iter62 in self.events:", "None: oprot.writeFieldBegin('parameters', TType.MAP, 4) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.parameters)) for kiter34, viter35 in self.parameters.items(): oprot.writeString(kiter34.encode('utf-8')", "4) self.user.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r'", "in self.headers.items(): oprot.writeString(kiter71.encode('utf-8') if sys.version_info[0] == 2 else kiter71) oprot.writeString(viter72.encode('utf-8') if sys.version_info[0] ==", "self.routeId is not None: oprot.writeFieldBegin('routeId', TType.I64, 1) oprot.writeI64(self.routeId) oprot.writeFieldEnd() if self.baseUrl is not", "TType.STRUCT, 'http', [HttpRequest, None], None, ), # 1 (2, TType.STRUCT, 'rpc', [RpcRequest, None],", "else iprot.readString() _val70 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() self.headers[_key69]", "- message \"\"\" def __init__(self, success=None, message=None,): self.success = success self.message = message", "TType.STRING, 1) oprot.writeString(self.name.encode('utf-8') if sys.version_info[0] == 2 else self.name) oprot.writeFieldEnd() if self.code is", "elif fid == 3: if ftype == TType.LIST: self.logs = [] (_etype59, _size56)", "'%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__", "TType.MAP, 'headers', (TType.STRING, 'UTF8', TType.STRING, 'UTF8', False), None, ), # 2 (3, TType.MAP,", "__ne__(self, other): return not (self == other) class Action(object): \"\"\" Attributes: - name", "= request self.context = context def read(self, iprot): if iprot._fast_decode is not None", "if sys.version_info[0] == 2 else kiter71) oprot.writeString(viter72.encode('utf-8') if sys.version_info[0] == 2 else viter72)", "other) class RpcRequest(object): \"\"\" Attributes: - arguments \"\"\" def __init__(self, arguments=None,): self.arguments =", "(self == other) class Execute(object): \"\"\" Attributes: - action - request - context", "self.eventName is not None: oprot.writeFieldBegin('eventName', TType.STRING, 1) oprot.writeString(self.eventName.encode('utf-8') if sys.version_info[0] == 2 else", "self.id is not None: oprot.writeFieldBegin('id', TType.I64, 1) oprot.writeI64(self.id) oprot.writeFieldEnd() if self.roleId is not", "== TType.STRING: self.type = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() else:", "iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() self.parameters.append(_elem47) iprot.readListEnd() else: iprot.skip(ftype) else:", "self.user = user def read(self, iprot): if iprot._fast_decode is not None and isinstance(iprot.trans,", "), # 1 ) all_structs.append(Context) Context.thrift_spec = ( None, # 0 (1, TType.I64,", "if sys.version_info[0] == 2 else self.name) oprot.writeFieldEnd() if self.email is not None: oprot.writeFieldBegin('email',", "TType.STRING, 'name', 'UTF8', None, ), # 1 (2, TType.STRING, 'code', 'UTF8', None, ),", "None, ), # 4 (5, TType.STRING, 'url', 'UTF8', None, ), # 5 (6,", "not None and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('User') if", "[self.__class__, self.thrift_spec])) return oprot.writeStructBegin('RpcRequest') if self.arguments is not None: oprot.writeFieldBegin('arguments', TType.STRING, 1) oprot.writeString(self.arguments.encode('utf-8')", "self.user.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r' %", "None, # 0 (1, TType.STRING, 'method', 'UTF8', None, ), # 1 (2, TType.MAP,", "iprot.readString() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot._fast_encode is", "TType.MAP, 4) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.parameters)) for kiter34, viter35 in self.parameters.items(): oprot.writeString(kiter34.encode('utf-8') if sys.version_info[0]", "8: if ftype == TType.LIST: self.parameters = [] (_etype45, _size42) = iprot.readListBegin() for", "TType.MAP: self.headers = {} (_ktype65, _vtype66, _size64) = iprot.readMapBegin() for _i68 in range(_size64):", "else iprot.readString() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRING: self.code", "TType.I64, 'userId', None, None, ), # 2 (3, TType.I32, 'status', None, None, ),", "'id', None, None, ), # 1 (2, TType.I64, 'roleId', None, None, ), #", "self.userId is not None: oprot.writeFieldBegin('userId', TType.I64, 2) oprot.writeI64(self.userId) oprot.writeFieldEnd() if self.status is not", "_size56) = iprot.readListBegin() for _i60 in range(_size56): _elem61 = Log() _elem61.read(iprot) self.logs.append(_elem61) iprot.readListEnd()", "else viter72) oprot.writeMapEnd() oprot.writeFieldEnd() if self.body is not None: oprot.writeFieldBegin('body', TType.STRING, 3) oprot.writeString(self.body.encode('utf-8')", "== TType.MAP: self.config = {} (_ktype1, _vtype2, _size0) = iprot.readMapBegin() for _i4 in", "# 1 (2, TType.I64, 'roleId', None, None, ), # 2 (3, TType.I64, 'categoryId',", "== 2 else viter31) oprot.writeMapEnd() oprot.writeFieldEnd() if self.uriFragments is not None: oprot.writeFieldBegin('uriFragments', TType.MAP,", "(self == other) class Event(object): \"\"\" Attributes: - eventName - data \"\"\" def", "[] class Message(object): \"\"\" Attributes: - success - message \"\"\" def __init__(self, success=None,", "oprot.writeString(self.name.encode('utf-8') if sys.version_info[0] == 2 else self.name) oprot.writeFieldEnd() if self.code is not None:", "4) oprot.writeI32(self.status) oprot.writeFieldEnd() if self.name is not None: oprot.writeFieldBegin('name', TType.STRING, 5) oprot.writeString(self.name.encode('utf-8') if", "oprot.writeListEnd() oprot.writeFieldEnd() if self.logs is not None: oprot.writeFieldBegin('logs', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.logs)) for", "User.thrift_spec = ( None, # 0 (1, TType.I64, 'id', None, None, ), #", "oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Response') if self.statusCode is not None: oprot.writeFieldBegin('statusCode', TType.I32, 1)", "0 (1, TType.STRUCT, 'response', [Response, None], None, ), # 1 (2, TType.LIST, 'events',", "None, ), # 4 (5, TType.STRING, 'name', 'UTF8', None, ), # 5 (6,", "2 else iprot.readString() else: iprot.skip(ftype) elif fid == 3: if ftype == TType.MAP:", "0 (1, TType.STRUCT, 'http', [HttpRequest, None], None, ), # 1 (2, TType.STRUCT, 'rpc',", "TType.STOP: break if fid == 1: if ftype == TType.I32: self.statusCode = iprot.readI32()", "self.user is not None: oprot.writeFieldBegin('user', TType.STRUCT, 4) self.user.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self):", "ftype == TType.STRING: self.method = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()", "'UTF8', None, ), # 2 ) all_structs.append(Log) Log.thrift_spec = ( None, # 0", "(self == other) class Result(object): \"\"\" Attributes: - response - events - logs", "other): return not (self == other) class Log(object): \"\"\" Attributes: - level -", "iprot.readListEnd() else: iprot.skip(ftype) elif fid == 3: if ftype == TType.LIST: self.logs =", "if fid == 1: if ftype == TType.I64: self.id = iprot.readI64() else: iprot.skip(ftype)", "iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() else: iprot.skip(ftype) elif fid ==", "self.uriFragments[_key21] = _val22 iprot.readMapEnd() else: iprot.skip(ftype) elif fid == 4: if ftype ==", "if self.name is not None: oprot.writeFieldBegin('name', TType.STRING, 4) oprot.writeString(self.name.encode('utf-8') if sys.version_info[0] == 2", "3) self.context.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r'", "8) oprot.writeListBegin(TType.STRING, len(self.parameters)) for iter49 in self.parameters: oprot.writeString(iter49.encode('utf-8') if sys.version_info[0] == 2 else", "TType.STRING, 'method', 'UTF8', None, ), # 1 (2, TType.MAP, 'headers', (TType.STRING, 'UTF8', TType.STRING,", "class Context(object): \"\"\" Attributes: - routeId - baseUrl - app - user \"\"\"", "oprot.writeFieldBegin('baseUrl', TType.STRING, 2) oprot.writeString(self.baseUrl.encode('utf-8') if sys.version_info[0] == 2 else self.baseUrl) oprot.writeFieldEnd() if self.app", "= {} (_ktype24, _vtype25, _size23) = iprot.readMapBegin() for _i27 in range(_size23): _key28 =", "HttpRequest(object): \"\"\" Attributes: - method - headers - uriFragments - parameters - body", "self.arguments is not None: oprot.writeFieldBegin('arguments', TType.STRING, 1) oprot.writeString(self.arguments.encode('utf-8') if sys.version_info[0] == 2 else", "config \"\"\" def __init__(self, name=None, type=None, config=None,): self.name = name self.type = type", "oprot.writeString(self.baseUrl.encode('utf-8') if sys.version_info[0] == 2 else self.baseUrl) oprot.writeFieldEnd() if self.app is not None:", "- logs \"\"\" def __init__(self, response=None, events=None, logs=None,): self.response = response self.events =", "), # 6 (7, TType.I32, 'points', None, None, ), # 7 ) all_structs.append(Result)", "fid == 4: if ftype == TType.I32: self.status = iprot.readI32() else: iprot.skip(ftype) elif", "oprot.writeString(self.appKey.encode('utf-8') if sys.version_info[0] == 2 else self.appKey) oprot.writeFieldEnd() if self.scopes is not None:", "None: oprot.writeFieldBegin('response', TType.STRUCT, 1) self.response.write(oprot) oprot.writeFieldEnd() if self.events is not None: oprot.writeFieldBegin('events', TType.LIST,", "2 else self.name) oprot.writeFieldEnd() if self.code is not None: oprot.writeFieldBegin('code', TType.STRING, 2) oprot.writeString(self.code.encode('utf-8')", "TType.MAP: self.parameters = {} (_ktype24, _vtype25, _size23) = iprot.readMapBegin() for _i27 in range(_size23):", "ftype == TType.STRUCT: self.http = HttpRequest() self.http.read(iprot) else: iprot.skip(ftype) elif fid == 2:", "url self.appKey = appKey self.scopes = scopes self.parameters = parameters def read(self, iprot):", "TType.STRING, 'name', 'UTF8', None, ), # 5 (6, TType.STRING, 'email', 'UTF8', None, ),", "other): return not (self == other) class Action(object): \"\"\" Attributes: - name -", "fid == 2: if ftype == TType.MAP: self.headers = {} (_ktype65, _vtype66, _size64)", "if sys.version_info[0] == 2 else self.data) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def", "= iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() self.config[_key5] = _val6 iprot.readMapEnd()", "= iprot.readBool() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRING: self.message", "import sys from thrift.transport import TTransport all_structs = [] class Message(object): \"\"\" Attributes:", "TType.STRING: self.code = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() else: iprot.skip(ftype)", "iprot.readString() _val15 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() self.headers[_key14] =", "sys.version_info[0] == 2 else self.action) oprot.writeFieldEnd() if self.request is not None: oprot.writeFieldBegin('request', TType.STRUCT,", "(_ktype17, _vtype18, _size16) = iprot.readMapBegin() for _i20 in range(_size16): _key21 = iprot.readString().decode('utf-8', errors='replace')", "\"\"\" Attributes: - http - rpc \"\"\" def __init__(self, http=None, rpc=None,): self.http =", "and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Event') if self.eventName is", "iprot.readListEnd() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot._fast_encode is", "2 (3, TType.I32, 'status', None, None, ), # 3 (4, TType.STRING, 'name', 'UTF8',", "'UTF8', None, ), # 2 ) all_structs.append(Connection) Connection.thrift_spec = ( None, # 0", "None, None, ), # 1 (2, TType.I64, 'userId', None, None, ), # 2", "self.uriFragments = {} (_ktype17, _vtype18, _size16) = iprot.readMapBegin() for _i20 in range(_size16): _key21", "None: oprot.writeFieldBegin('scopes', TType.LIST, 7) oprot.writeListBegin(TType.STRING, len(self.scopes)) for iter48 in self.scopes: oprot.writeString(iter48.encode('utf-8') if sys.version_info[0]", "_key28 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() _val29 = iprot.readString().decode('utf-8',", "3: if ftype == TType.I32: self.status = iprot.readI32() else: iprot.skip(ftype) elif fid ==", "TType.STRING, 'appKey', 'UTF8', None, ), # 6 (7, TType.LIST, 'scopes', (TType.STRING, 'UTF8', False),", "id=None, userId=None, status=None, name=None, url=None, appKey=None, scopes=None, parameters=None,): self.id = id self.userId =", "__init__(self, level=None, message=None,): self.level = level self.message = message def read(self, iprot): if", "(3, TType.LIST, 'logs', (TType.STRUCT, [Log, None], False), None, ), # 3 ) all_structs.append(Response)", "if ftype == TType.STRING: self.baseUrl = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else", "is not None: oprot.writeFieldBegin('events', TType.LIST, 2) oprot.writeListBegin(TType.STRUCT, len(self.events)) for iter62 in self.events: iter62.write(oprot)", "else iprot.readString() self.uriFragments[_key21] = _val22 iprot.readMapEnd() else: iprot.skip(ftype) elif fid == 4: if", "return oprot.writeStructBegin('Context') if self.routeId is not None: oprot.writeFieldBegin('routeId', TType.I64, 1) oprot.writeI64(self.routeId) oprot.writeFieldEnd() if", "iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() _val15 = iprot.readString().decode('utf-8', errors='replace') if", "1: if ftype == TType.STRING: self.eventName = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2", "ftype == TType.STRUCT: self.request = Request() self.request.read(iprot) else: iprot.skip(ftype) elif fid == 3:", "else iprot.readString() else: iprot.skip(ftype) elif fid == 5: if ftype == TType.STRING: self.url", "self.name = name self.email = email self.points = points def read(self, iprot): if", "None, None, ), # 3 (4, TType.I32, 'status', None, None, ), # 4", "== 2 else iprot.readString() self.scopes.append(_elem41) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 8: if", "if ftype == TType.STRING: self.body = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else", "'parameters', (TType.STRING, 'UTF8', False), None, ), # 8 ) all_structs.append(User) User.thrift_spec = (", "break if fid == 1: if ftype == TType.BOOL: self.success = iprot.readBool() else:", "not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Message') if self.success is not None: oprot.writeFieldBegin('success',", "elif fid == 6: if ftype == TType.STRING: self.appKey = iprot.readString().decode('utf-8', errors='replace') if", "if fid == 1: if ftype == TType.BOOL: self.success = iprot.readBool() else: iprot.skip(ftype)", "other) all_structs.append(Message) Message.thrift_spec = ( None, # 0 (1, TType.BOOL, 'success', None, None,", "if ftype == TType.MAP: self.headers = {} (_ktype65, _vtype66, _size64) = iprot.readMapBegin() for", "oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Event') if self.eventName is not None: oprot.writeFieldBegin('eventName', TType.STRING, 1)", "config=None,): self.name = name self.type = type self.config = config def read(self, iprot):", "if self.status is not None: oprot.writeFieldBegin('status', TType.I32, 4) oprot.writeI32(self.status) oprot.writeFieldEnd() if self.name is", "by Thrift Compiler (0.14.2) # # DO NOT EDIT UNLESS YOU ARE SURE", "else iprot.readString() else: iprot.skip(ftype) elif fid == 3: if ftype == TType.MAP: self.config", "if self.routeId is not None: oprot.writeFieldBegin('routeId', TType.I64, 1) oprot.writeI64(self.routeId) oprot.writeFieldEnd() if self.baseUrl is", "type=None, config=None,): self.name = name self.type = type self.config = config def read(self,", ") all_structs.append(Connection) Connection.thrift_spec = ( None, # 0 (1, TType.STRING, 'name', 'UTF8', None,", "{} (_ktype17, _vtype18, _size16) = iprot.readMapBegin() for _i20 in range(_size16): _key21 = iprot.readString().decode('utf-8',", "ftype == TType.I64: self.id = iprot.readI64() else: iprot.skip(ftype) elif fid == 2: if", "), # 2 (3, TType.STRUCT, 'app', [App, None], None, ), # 3 (4,", "self.thrift_spec])) return oprot.writeStructBegin('Response') if self.statusCode is not None: oprot.writeFieldBegin('statusCode', TType.I32, 1) oprot.writeI32(self.statusCode) oprot.writeFieldEnd()", "oprot.writeFieldBegin('headers', TType.MAP, 2) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.headers)) for kiter71, viter72 in self.headers.items(): oprot.writeString(kiter71.encode('utf-8') if", "not None: oprot.writeFieldBegin('categoryId', TType.I64, 3) oprot.writeI64(self.categoryId) oprot.writeFieldEnd() if self.status is not None: oprot.writeFieldBegin('status',", "app self.user = user def read(self, iprot): if iprot._fast_decode is not None and", "len(self.scopes)) for iter48 in self.scopes: oprot.writeString(iter48.encode('utf-8') if sys.version_info[0] == 2 else iter48) oprot.writeListEnd()", "RpcRequest(object): \"\"\" Attributes: - arguments \"\"\" def __init__(self, arguments=None,): self.arguments = arguments def", "self.parameters = {} (_ktype24, _vtype25, _size23) = iprot.readMapBegin() for _i27 in range(_size23): _key28", "if ftype == TType.I64: self.roleId = iprot.readI64() else: iprot.skip(ftype) elif fid == 3:", "3) oprot.writeListBegin(TType.STRUCT, len(self.logs)) for iter63 in self.logs: iter63.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def", "else self.level) oprot.writeFieldEnd() if self.message is not None: oprot.writeFieldBegin('message', TType.STRING, 2) oprot.writeString(self.message.encode('utf-8') if", "'level', 'UTF8', None, ), # 1 (2, TType.STRING, 'message', 'UTF8', None, ), #", "- request - context \"\"\" def __init__(self, action=None, request=None, context=None,): self.action = action", "self.roleId = iprot.readI64() else: iprot.skip(ftype) elif fid == 3: if ftype == TType.I64:", "(0.14.2) # # DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW", "== 2 else kiter34) oprot.writeString(viter35.encode('utf-8') if sys.version_info[0] == 2 else viter35) oprot.writeMapEnd() oprot.writeFieldEnd()", "fid == 2: if ftype == TType.STRING: self.data = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0]", "== TType.STRING: self.message = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() else:", "self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other)", "fid == 3: if ftype == TType.I32: self.status = iprot.readI32() else: iprot.skip(ftype) elif", "ftype == TType.LIST: self.logs = [] (_etype59, _size56) = iprot.readListBegin() for _i60 in", "None, ), # 2 (3, TType.STRUCT, 'app', [App, None], None, ), # 3", "[Event, None], False), None, ), # 2 (3, TType.LIST, 'logs', (TType.STRUCT, [Log, None],", "self.baseUrl is not None: oprot.writeFieldBegin('baseUrl', TType.STRING, 2) oprot.writeString(self.baseUrl.encode('utf-8') if sys.version_info[0] == 2 else", "elif fid == 2: if ftype == TType.STRING: self.code = iprot.readString().decode('utf-8', errors='replace') if", "fid == 2: if ftype == TType.STRING: self.type = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0]", "if sys.version_info[0] == 2 else iprot.readString() self.config[_key5] = _val6 iprot.readMapEnd() else: iprot.skip(ftype) else:", "elif fid == 4: if ftype == TType.STRING: self.name = iprot.readString().decode('utf-8', errors='replace') if", "def __init__(self, name=None, code=None,): self.name = name self.code = code def read(self, iprot):", "oprot.writeFieldBegin('id', TType.I64, 1) oprot.writeI64(self.id) oprot.writeFieldEnd() if self.userId is not None: oprot.writeFieldBegin('userId', TType.I64, 2)", "'code', 'UTF8', None, ), # 2 ) all_structs.append(Execute) Execute.thrift_spec = ( None, #", "self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class RpcRequest(object):", "else: iprot.skip(ftype) elif fid == 3: if ftype == TType.STRING: self.body = iprot.readString().decode('utf-8',", "oprot.writeMapEnd() oprot.writeFieldEnd() if self.body is not None: oprot.writeFieldBegin('body', TType.STRING, 3) oprot.writeString(self.body.encode('utf-8') if sys.version_info[0]", "for kiter7, viter8 in self.config.items(): oprot.writeString(kiter7.encode('utf-8') if sys.version_info[0] == 2 else kiter7) oprot.writeString(viter8.encode('utf-8')", "not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('User') if self.id is not None: oprot.writeFieldBegin('id',", "1) oprot.writeString(self.eventName.encode('utf-8') if sys.version_info[0] == 2 else self.eventName) oprot.writeFieldEnd() if self.data is not", "self.success = iprot.readBool() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRING:", "app - user \"\"\" def __init__(self, routeId=None, baseUrl=None, app=None, user=None,): self.routeId = routeId", "(2, TType.STRING, 'code', 'UTF8', None, ), # 2 ) all_structs.append(Execute) Execute.thrift_spec = (", "else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRUCT: self.rpc = RpcRequest()", "break if fid == 1: if ftype == TType.STRUCT: self.response = Response() self.response.read(iprot)", "# 0 (1, TType.STRING, 'level', 'UTF8', None, ), # 1 (2, TType.STRING, 'message',", "iprot.readMapBegin() for _i4 in range(_size0): _key5 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2", "range(_size56): _elem61 = Log() _elem61.read(iprot) self.logs.append(_elem61) iprot.readListEnd() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd()", "), # 3 ) all_structs.append(Action) Action.thrift_spec = ( None, # 0 (1, TType.STRING,", "2: if ftype == TType.STRUCT: self.request = Request() self.request.read(iprot) else: iprot.skip(ftype) elif fid", "0 (1, TType.STRING, 'name', 'UTF8', None, ), # 1 (2, TType.STRING, 'code', 'UTF8',", "elif fid == 6: if ftype == TType.STRING: self.email = iprot.readString().decode('utf-8', errors='replace') if", "is not None: oprot.writeFieldBegin('categoryId', TType.I64, 3) oprot.writeI64(self.categoryId) oprot.writeFieldEnd() if self.status is not None:", "_i46 in range(_size42): _elem47 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()", "), # 1 (2, TType.I64, 'roleId', None, None, ), # 2 (3, TType.I64,", "self.config = config def read(self, iprot): if iprot._fast_decode is not None and isinstance(iprot.trans,", "== 2 else iprot.readString() _val6 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else", "not (self == other) class Context(object): \"\"\" Attributes: - routeId - baseUrl -", "# 7 (8, TType.LIST, 'parameters', (TType.STRING, 'UTF8', False), None, ), # 8 )", "TType.I32, 'points', None, None, ), # 7 ) all_structs.append(Result) Result.thrift_spec = ( None,", "- scopes - parameters \"\"\" def __init__(self, id=None, userId=None, status=None, name=None, url=None, appKey=None,", "errors='replace') if sys.version_info[0] == 2 else iprot.readString() _val22 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0]", "self.scopes is not None: oprot.writeFieldBegin('scopes', TType.LIST, 7) oprot.writeListBegin(TType.STRING, len(self.scopes)) for iter48 in self.scopes:", "(3, TType.STRUCT, 'app', [App, None], None, ), # 3 (4, TType.STRUCT, 'user', [User,", "other.__dict__ def __ne__(self, other): return not (self == other) class Result(object): \"\"\" Attributes:", "1 (2, TType.MAP, 'headers', (TType.STRING, 'UTF8', TType.STRING, 'UTF8', False), None, ), # 2", "== TType.MAP: self.headers = {} (_ktype10, _vtype11, _size9) = iprot.readMapBegin() for _i13 in", "self.points = iprot.readI32() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if", "else: iprot.skip(ftype) elif fid == 4: if ftype == TType.I32: self.status = iprot.readI32()", "== 4: if ftype == TType.MAP: self.parameters = {} (_ktype24, _vtype25, _size23) =", "_vtype18, _size16) = iprot.readMapBegin() for _i20 in range(_size16): _key21 = iprot.readString().decode('utf-8', errors='replace') if", "if ftype == TType.STRUCT: self.context = Context() self.context.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd()", "self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class Execute(object):", "oprot.writeStructBegin('Request') if self.http is not None: oprot.writeFieldBegin('http', TType.STRUCT, 1) self.http.write(oprot) oprot.writeFieldEnd() if self.rpc", "else: iprot.skip(ftype) elif fid == 5: if ftype == TType.STRING: self.name = iprot.readString().decode('utf-8',", "if sys.version_info[0] == 2 else self.email) oprot.writeFieldEnd() if self.points is not None: oprot.writeFieldBegin('points',", "other.__dict__ def __ne__(self, other): return not (self == other) class RpcRequest(object): \"\"\" Attributes:", "( None, # 0 (1, TType.STRING, 'name', 'UTF8', None, ), # 1 (2,", "None and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Execute') if self.action", "self.user.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot._fast_encode is", "None: oprot.writeFieldBegin('config', TType.MAP, 3) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.config)) for kiter7, viter8 in self.config.items(): oprot.writeString(kiter7.encode('utf-8')", "self.categoryId is not None: oprot.writeFieldBegin('categoryId', TType.I64, 3) oprot.writeI64(self.categoryId) oprot.writeFieldEnd() if self.status is not", "import fix_spec import sys from thrift.transport import TTransport all_structs = [] class Message(object):", "7) oprot.writeListBegin(TType.STRING, len(self.scopes)) for iter48 in self.scopes: oprot.writeString(iter48.encode('utf-8') if sys.version_info[0] == 2 else", "1) oprot.writeString(self.name.encode('utf-8') if sys.version_info[0] == 2 else self.name) oprot.writeFieldEnd() if self.type is not", "7) oprot.writeI32(self.points) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r'", "other.__dict__ def __ne__(self, other): return not (self == other) class Response(object): \"\"\" Attributes:", "oprot.writeFieldEnd() if self.data is not None: oprot.writeFieldBegin('data', TType.STRING, 2) oprot.writeString(self.data.encode('utf-8') if sys.version_info[0] ==", "2 else self.type) oprot.writeFieldEnd() if self.config is not None: oprot.writeFieldBegin('config', TType.MAP, 3) oprot.writeMapBegin(TType.STRING,", "== 2 else iprot.readString() else: iprot.skip(ftype) elif fid == 3: if ftype ==", "self.thrift_spec])) return oprot.writeStructBegin('Event') if self.eventName is not None: oprot.writeFieldBegin('eventName', TType.STRING, 1) oprot.writeString(self.eventName.encode('utf-8') if", "_key14 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() _val15 = iprot.readString().decode('utf-8',", "errors='replace') if sys.version_info[0] == 2 else iprot.readString() self.headers[_key14] = _val15 iprot.readMapEnd() else: iprot.skip(ftype)", "self.name is not None: oprot.writeFieldBegin('name', TType.STRING, 4) oprot.writeString(self.name.encode('utf-8') if sys.version_info[0] == 2 else", "def read(self, iprot): if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec", "oprot.writeFieldBegin('name', TType.STRING, 4) oprot.writeString(self.name.encode('utf-8') if sys.version_info[0] == 2 else self.name) oprot.writeFieldEnd() if self.url", "fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if", "- user \"\"\" def __init__(self, routeId=None, baseUrl=None, app=None, user=None,): self.routeId = routeId self.baseUrl", "sys.version_info[0] == 2 else iprot.readString() self.config[_key5] = _val6 iprot.readMapEnd() else: iprot.skip(ftype) else: iprot.skip(ftype)", "TType.STRING, 6) oprot.writeString(self.appKey.encode('utf-8') if sys.version_info[0] == 2 else self.appKey) oprot.writeFieldEnd() if self.scopes is", "'UTF8', False), None, ), # 3 ) all_structs.append(Action) Action.thrift_spec = ( None, #", "TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.logs)) for iter63 in self.logs: iter63.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd()", "None: oprot.writeFieldBegin('body', TType.STRING, 3) oprot.writeString(self.body.encode('utf-8') if sys.version_info[0] == 2 else self.body) oprot.writeFieldEnd() oprot.writeFieldStop()", "False), None, ), # 3 (4, TType.MAP, 'parameters', (TType.STRING, 'UTF8', TType.STRING, 'UTF8', False),", "self.body is not None: oprot.writeFieldBegin('body', TType.STRING, 5) oprot.writeString(self.body.encode('utf-8') if sys.version_info[0] == 2 else", "parameters def read(self, iprot): if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and", "'UTF8', None, ), # 1 (2, TType.STRING, 'code', 'UTF8', None, ), # 2", "and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Context') if self.routeId is", "# 5 ) all_structs.append(RpcRequest) RpcRequest.thrift_spec = ( None, # 0 (1, TType.STRING, 'arguments',", "(TType.STRING, 'UTF8', False), None, ), # 8 ) all_structs.append(User) User.thrift_spec = ( None,", "'headers', (TType.STRING, 'UTF8', TType.STRING, 'UTF8', False), None, ), # 2 (3, TType.MAP, 'uriFragments',", "= iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() else: iprot.skip(ftype) else: iprot.skip(ftype)", "iter63 in self.logs: iter63.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self):", "else: iprot.skip(ftype) elif fid == 4: if ftype == TType.STRUCT: self.user = User()", "class Execute(object): \"\"\" Attributes: - action - request - context \"\"\" def __init__(self,", "self.uriFragments = uriFragments self.parameters = parameters self.body = body def read(self, iprot): if", "is not None: oprot.writeFieldBegin('appKey', TType.STRING, 6) oprot.writeString(self.appKey.encode('utf-8') if sys.version_info[0] == 2 else self.appKey)", "if sys.version_info[0] == 2 else viter72) oprot.writeMapEnd() oprot.writeFieldEnd() if self.body is not None:", "None and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Response') if self.statusCode", "not None: oprot.writeFieldBegin('success', TType.BOOL, 1) oprot.writeBool(self.success) oprot.writeFieldEnd() if self.message is not None: oprot.writeFieldBegin('message',", "self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class Log(object):", "TType.STRING, 'UTF8', False), None, ), # 2 (3, TType.MAP, 'uriFragments', (TType.STRING, 'UTF8', TType.STRING,", "self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('App') if self.id is not", "2 (3, TType.MAP, 'uriFragments', (TType.STRING, 'UTF8', TType.STRING, 'UTF8', False), None, ), # 3", "'UTF8', False), None, ), # 3 (4, TType.MAP, 'parameters', (TType.STRING, 'UTF8', TType.STRING, 'UTF8',", "== other) class Event(object): \"\"\" Attributes: - eventName - data \"\"\" def __init__(self,", "oprot.writeFieldBegin('context', TType.STRUCT, 3) self.context.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L", "http=None, rpc=None,): self.http = http self.rpc = rpc def read(self, iprot): if iprot._fast_decode", "sys.version_info[0] == 2 else iprot.readString() self.scopes.append(_elem41) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 8:", "== 2 else iprot.readString() self.uriFragments[_key21] = _val22 iprot.readMapEnd() else: iprot.skip(ftype) elif fid ==", "3) self.app.write(oprot) oprot.writeFieldEnd() if self.user is not None: oprot.writeFieldBegin('user', TType.STRUCT, 4) self.user.write(oprot) oprot.writeFieldEnd()", "sys from thrift.transport import TTransport all_structs = [] class Message(object): \"\"\" Attributes: -", "oprot.writeFieldBegin('data', TType.STRING, 2) oprot.writeString(self.data.encode('utf-8') if sys.version_info[0] == 2 else self.data) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd()", "not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None: iprot._fast_decode(self, iprot, [self.__class__,", "= _val15 iprot.readMapEnd() else: iprot.skip(ftype) elif fid == 3: if ftype == TType.MAP:", "\"\"\" Attributes: - routeId - baseUrl - app - user \"\"\" def __init__(self,", "# 3 (4, TType.I32, 'status', None, None, ), # 4 (5, TType.STRING, 'name',", "iprot.readI64() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.I64: self.userId =", "is not None: oprot.writeFieldBegin('success', TType.BOOL, 1) oprot.writeBool(self.success) oprot.writeFieldEnd() if self.message is not None:", "viter31 in self.headers.items(): oprot.writeString(kiter30.encode('utf-8') if sys.version_info[0] == 2 else kiter30) oprot.writeString(viter31.encode('utf-8') if sys.version_info[0]", "fid == 3: if ftype == TType.MAP: self.config = {} (_ktype1, _vtype2, _size0)", "level=None, message=None,): self.level = level self.message = message def read(self, iprot): if iprot._fast_decode", "oprot.writeFieldBegin('action', TType.STRING, 1) oprot.writeString(self.action.encode('utf-8') if sys.version_info[0] == 2 else self.action) oprot.writeFieldEnd() if self.request", "iprot.readString() else: iprot.skip(ftype) elif fid == 6: if ftype == TType.STRING: self.appKey =", "iprot.readMapBegin() for _i20 in range(_size16): _key21 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2", "headers=None, body=None,): self.statusCode = statusCode self.headers = headers self.body = body def read(self,", "oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Execute') if self.action is not None: oprot.writeFieldBegin('action', TType.STRING, 1)", "= name self.url = url self.appKey = appKey self.scopes = scopes self.parameters =", "None, ), # 2 (3, TType.MAP, 'uriFragments', (TType.STRING, 'UTF8', TType.STRING, 'UTF8', False), None,", "= _val70 iprot.readMapEnd() else: iprot.skip(ftype) elif fid == 3: if ftype == TType.STRING:", "self.thrift_spec])) return oprot.writeStructBegin('Action') if self.name is not None: oprot.writeFieldBegin('name', TType.STRING, 1) oprot.writeString(self.name.encode('utf-8') if", "iprot.readMapBegin() for _i13 in range(_size9): _key14 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2", "None, ), # 1 (2, TType.STRING, 'message', 'UTF8', None, ), # 2 )", "TType.STOP: break if fid == 1: if ftype == TType.STRING: self.name = iprot.readString().decode('utf-8',", "body=None,): self.statusCode = statusCode self.headers = headers self.body = body def read(self, iprot):", "1) oprot.writeString(self.arguments.encode('utf-8') if sys.version_info[0] == 2 else self.arguments) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self):", "fid == 6: if ftype == TType.STRING: self.email = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0]", "(4, TType.MAP, 'parameters', (TType.STRING, 'UTF8', TType.STRING, 'UTF8', False), None, ), # 4 (5,", "None and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('App') if self.id", "if fid == 1: if ftype == TType.STRING: self.eventName = iprot.readString().decode('utf-8', errors='replace') if", "iprot.readI64() else: iprot.skip(ftype) elif fid == 3: if ftype == TType.I64: self.categoryId =", "\"\"\" def __init__(self, level=None, message=None,): self.level = level self.message = message def read(self,", "\"\"\" def __init__(self, http=None, rpc=None,): self.http = http self.rpc = rpc def read(self,", "TType.I64: self.categoryId = iprot.readI64() else: iprot.skip(ftype) elif fid == 4: if ftype ==", "self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class Result(object):", "parameters self.body = body def read(self, iprot): if iprot._fast_decode is not None and", "oprot.writeFieldBegin('email', TType.STRING, 6) oprot.writeString(self.email.encode('utf-8') if sys.version_info[0] == 2 else self.email) oprot.writeFieldEnd() if self.points", "WHAT YOU ARE DOING # # options string: py # from thrift.Thrift import", "(1, TType.STRING, 'eventName', 'UTF8', None, ), # 1 (2, TType.STRING, 'data', 'UTF8', None,", "== other.__dict__ def __ne__(self, other): return not (self == other) class Response(object): \"\"\"", "Log.thrift_spec = ( None, # 0 (1, TType.STRING, 'level', 'UTF8', None, ), #", "= iprot.readListBegin() for _i54 in range(_size50): _elem55 = Event() _elem55.read(iprot) self.events.append(_elem55) iprot.readListEnd() else:", "oprot.writeListBegin(TType.STRUCT, len(self.events)) for iter62 in self.events: iter62.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.logs is not", "self.eventName = eventName self.data = data def read(self, iprot): if iprot._fast_decode is not", "if self.events is not None: oprot.writeFieldBegin('events', TType.LIST, 2) oprot.writeListBegin(TType.STRUCT, len(self.events)) for iter62 in", "2 ) all_structs.append(Execute) Execute.thrift_spec = ( None, # 0 (1, TType.STRING, 'action', 'UTF8',", "not (self == other) class App(object): \"\"\" Attributes: - id - userId -", "viter35) oprot.writeMapEnd() oprot.writeFieldEnd() if self.body is not None: oprot.writeFieldBegin('body', TType.STRING, 5) oprot.writeString(self.body.encode('utf-8') if", "ftype == TType.I64: self.categoryId = iprot.readI64() else: iprot.skip(ftype) elif fid == 4: if", "else iprot.readString() self.headers[_key14] = _val15 iprot.readMapEnd() else: iprot.skip(ftype) elif fid == 3: if", "oprot.writeFieldBegin('response', TType.STRUCT, 1) self.response.write(oprot) oprot.writeFieldEnd() if self.events is not None: oprot.writeFieldBegin('events', TType.LIST, 2)", "return not (self == other) class Action(object): \"\"\" Attributes: - name - code", "else self.code) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r'", "else self.eventName) oprot.writeFieldEnd() if self.data is not None: oprot.writeFieldBegin('data', TType.STRING, 2) oprot.writeString(self.data.encode('utf-8') if", "else self.message) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r'", "2) oprot.writeString(self.message.encode('utf-8') if sys.version_info[0] == 2 else self.message) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self):", "_key69 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() _val70 = iprot.readString().decode('utf-8',", "\"\"\" Attributes: - level - message \"\"\" def __init__(self, level=None, message=None,): self.level =", "oprot.writeFieldBegin('method', TType.STRING, 1) oprot.writeString(self.method.encode('utf-8') if sys.version_info[0] == 2 else self.method) oprot.writeFieldEnd() if self.headers", "not None: oprot.writeFieldBegin('data', TType.STRING, 2) oprot.writeString(self.data.encode('utf-8') if sys.version_info[0] == 2 else self.data) oprot.writeFieldEnd()", "(self == other) class HttpRequest(object): \"\"\" Attributes: - method - headers - uriFragments", "if sys.version_info[0] == 2 else iprot.readString() _val22 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] ==", "and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Response') if self.statusCode is", "# 3 ) all_structs.append(Event) Event.thrift_spec = ( None, # 0 (1, TType.STRING, 'eventName',", "self.logs: iter63.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L =", "not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Event') if self.eventName is not None: oprot.writeFieldBegin('eventName',", "'name', 'UTF8', None, ), # 1 (2, TType.STRING, 'code', 'UTF8', None, ), #", "if ftype == TType.I64: self.categoryId = iprot.readI64() else: iprot.skip(ftype) elif fid == 4:", "self.__dict__.items()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__)", "if self.message is not None: oprot.writeFieldBegin('message', TType.STRING, 2) oprot.writeString(self.message.encode('utf-8') if sys.version_info[0] == 2", "oprot._fast_encode is not None and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return", "TType.STRING, 4) oprot.writeString(self.name.encode('utf-8') if sys.version_info[0] == 2 else self.name) oprot.writeFieldEnd() if self.url is", "and self.thrift_spec is not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('User') if self.id is", "self.email = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() else: iprot.skip(ftype) elif", "and self.thrift_spec is not None: iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec]) return iprot.readStructBegin() while True:", "sys.version_info[0] == 2 else self.method) oprot.writeFieldEnd() if self.headers is not None: oprot.writeFieldBegin('headers', TType.MAP,", "False), None, ), # 2 (3, TType.MAP, 'uriFragments', (TType.STRING, 'UTF8', TType.STRING, 'UTF8', False),", "= iprot.readMapBegin() for _i13 in range(_size9): _key14 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] ==", "if sys.version_info[0] == 2 else iprot.readString() _val15 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] ==", "3: if ftype == TType.MAP: self.config = {} (_ktype1, _vtype2, _size0) = iprot.readMapBegin()", "oprot.writeFieldBegin('headers', TType.MAP, 2) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.headers)) for kiter30, viter31 in self.headers.items(): oprot.writeString(kiter30.encode('utf-8') if", "'points', None, None, ), # 7 ) all_structs.append(Result) Result.thrift_spec = ( None, #", "'context', [Context, None], None, ), # 3 ) all_structs.append(Request) Request.thrift_spec = ( None,", "_size42) = iprot.readListBegin() for _i46 in range(_size42): _elem47 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0]", "kiter7, viter8 in self.config.items(): oprot.writeString(kiter7.encode('utf-8') if sys.version_info[0] == 2 else kiter7) oprot.writeString(viter8.encode('utf-8') if", "== other.__dict__ def __ne__(self, other): return not (self == other) class Event(object): \"\"\"", "oprot.writeFieldEnd() if self.points is not None: oprot.writeFieldBegin('points', TType.I32, 7) oprot.writeI32(self.points) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd()", "TType.MAP, 'config', (TType.STRING, 'UTF8', TType.STRING, 'UTF8', False), None, ), # 3 ) all_structs.append(Action)", "else iprot.readString() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.MAP: self.headers", "oprot.writeString(iter48.encode('utf-8') if sys.version_info[0] == 2 else iter48) oprot.writeListEnd() oprot.writeFieldEnd() if self.parameters is not", "= email self.points = points def read(self, iprot): if iprot._fast_decode is not None", "== 6: if ftype == TType.STRING: self.email = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] ==", "oprot.writeStructBegin('Connection') if self.name is not None: oprot.writeFieldBegin('name', TType.STRING, 1) oprot.writeString(self.name.encode('utf-8') if sys.version_info[0] ==", "Attributes: - name - type - config \"\"\" def __init__(self, name=None, type=None, config=None,):", "headers - body \"\"\" def __init__(self, statusCode=None, headers=None, body=None,): self.statusCode = statusCode self.headers", "2) oprot.writeString(self.baseUrl.encode('utf-8') if sys.version_info[0] == 2 else self.baseUrl) oprot.writeFieldEnd() if self.app is not", "# 7 ) all_structs.append(Result) Result.thrift_spec = ( None, # 0 (1, TType.STRUCT, 'response',", "oprot.writeFieldBegin('body', TType.STRING, 3) oprot.writeString(self.body.encode('utf-8') if sys.version_info[0] == 2 else self.body) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd()", "if ftype == TType.I64: self.routeId = iprot.readI64() else: iprot.skip(ftype) elif fid == 2:", "all_structs.append(HttpRequest) HttpRequest.thrift_spec = ( None, # 0 (1, TType.STRING, 'method', 'UTF8', None, ),", "other) class Action(object): \"\"\" Attributes: - name - code \"\"\" def __init__(self, name=None,", "other): return not (self == other) class RpcRequest(object): \"\"\" Attributes: - arguments \"\"\"", "not None: oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('HttpRequest') if self.method is not None: oprot.writeFieldBegin('method',", "iprot.readMapBegin() for _i68 in range(_size64): _key69 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2", "__init__(self, routeId=None, baseUrl=None, app=None, user=None,): self.routeId = routeId self.baseUrl = baseUrl self.app =", "kiter32, viter33 in self.uriFragments.items(): oprot.writeString(kiter32.encode('utf-8') if sys.version_info[0] == 2 else kiter32) oprot.writeString(viter33.encode('utf-8') if", "= ( None, # 0 (1, TType.STRING, 'eventName', 'UTF8', None, ), # 1", "errors='replace') if sys.version_info[0] == 2 else iprot.readString() self.headers[_key69] = _val70 iprot.readMapEnd() else: iprot.skip(ftype)", "ftype == TType.STRING: self.action = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString()", "else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRING: self.type = iprot.readString().decode('utf-8',", "== 2 else self.code) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L", "is not None: oprot.writeFieldBegin('userId', TType.I64, 2) oprot.writeI64(self.userId) oprot.writeFieldEnd() if self.status is not None:", "(self == other) class Connection(object): \"\"\" Attributes: - name - type - config", "- routeId - baseUrl - app - user \"\"\" def __init__(self, routeId=None, baseUrl=None,", "email - points \"\"\" def __init__(self, id=None, roleId=None, categoryId=None, status=None, name=None, email=None, points=None,):", "oprot.writeI64(self.id) oprot.writeFieldEnd() if self.roleId is not None: oprot.writeFieldBegin('roleId', TType.I64, 2) oprot.writeI64(self.roleId) oprot.writeFieldEnd() if", "(1, TType.STRING, 'level', 'UTF8', None, ), # 1 (2, TType.STRING, 'message', 'UTF8', None,", "= iprot.readMapBegin() for _i4 in range(_size0): _key5 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] ==", "self.code) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __repr__(self): L = ['%s=%r' %", "[] (_etype59, _size56) = iprot.readListBegin() for _i60 in range(_size56): _elem61 = Log() _elem61.read(iprot)", "oprot.writeStructBegin('Action') if self.name is not None: oprot.writeFieldBegin('name', TType.STRING, 1) oprot.writeString(self.name.encode('utf-8') if sys.version_info[0] ==", "== 2 else iprot.readString() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot):", "if sys.version_info[0] == 2 else self.eventName) oprot.writeFieldEnd() if self.data is not None: oprot.writeFieldBegin('data',", "), # 2 (3, TType.I64, 'categoryId', None, None, ), # 3 (4, TType.I32,", "2 else iprot.readString() else: iprot.skip(ftype) elif fid == 3: if ftype == TType.STRUCT:", "TType.STRING, 'UTF8', False), None, ), # 2 (3, TType.STRING, 'body', 'UTF8', None, ),", "return not (self == other) class Response(object): \"\"\" Attributes: - statusCode - headers", "if ftype == TType.STRING: self.level = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else", "oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec])) return oprot.writeStructBegin('Action') if self.name is not None: oprot.writeFieldBegin('name', TType.STRING, 1)", "name self.code = code def read(self, iprot): if iprot._fast_decode is not None and", "None], None, ), # 1 (2, TType.LIST, 'events', (TType.STRUCT, [Event, None], False), None," ]
[ "x_t + b_{iz} + W_{hz} h_{(t-1)} + b_{hz}) \\\\ n_t = \\tanh(W_{in} x_t", "i_r + h_r inputgate_tmp = i_i + h_i sigmoid = nn.Sigmoid() resetgate =", "x in torch.unbind(input_, dim=dim): # x dim is B, I hidden = self.cell(x,", "= sigmoid(resetgate_tmp) inputgate = sigmoid(inputgate_tmp) hr = self.h2h(hidden * resetgate) _, _, h_n", "bias=True, num_layers=1, batch_first=False, dropout=0.0): super().__init__() self.cell = MonotonicGruCell( input_size=input_size, hidden_size=hidden_size, bias=True) self.batch_first =", "std = 1.0 / math.sqrt(self.hidden_size) for w in self.parameters(): w.data.uniform_(-std, std) def forward(self,", "h_n) hy = hidden + (1.-hidden) * inputgate * newgate return hy class", "import math device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") class MonotonicGruCell(nn.Module): def __init__(self,", "\\\\ h_t = (1 - z_t) * n_t + h_{(t-1)} \\end{array} \"\"\" self.input_size", "+ b_{hz}) \\\\ n_t = \\tanh(W_{in} x_t + b_{in} + (W_{hn}(r_t* h_{(t-1)})+ b_{hn}))", "B, 3H gh = self.h2h(hidden) # B, 3H i_r, i_i, i_n = gi.chunk(3,", "= self.h2h(hidden * resetgate) _, _, h_n = hr.chunk(3, 1) newgate = sigmoid(i_n", "For each element in the input sequence, each layer computes the following function:", "n_t = \\tanh(W_{in} x_t + b_{in} + (W_{hn}(r_t* h_{(t-1)})+ b_{hn})) \\\\ h_t =", "nn import math device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") class MonotonicGruCell(nn.Module): def", "Math \\begin{array}{ll} r_t = \\sigma(W_{ir} x_t + b_{ir} + W_{hr} h_{(t-1)} + b_{hr})", "input_size, hidden_size, bias=True, num_layers=1, batch_first=False, dropout=0.0): super().__init__() self.cell = MonotonicGruCell( input_size=input_size, hidden_size=hidden_size, bias=True)", "MonotonicGruCell( input_size=input_size, hidden_size=hidden_size, bias=True) self.batch_first = batch_first def forward(self, input_, lengths, hidden=None): #", "self.batch_first = batch_first def forward(self, input_, lengths, hidden=None): # input_ is of dimensionalty", "self.input_size = input_size self.hidden_size = hidden_size self.i2h = nn.Linear(input_size, 3 * hidden_size, bias=bias)", "in torch.unbind(input_, dim=dim): # x dim is B, I hidden = self.cell(x, hidden)", "torch.nn as nn import math device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") class", "b_{hn})) \\\\ h_t = (1 - z_t) * n_t + h_{(t-1)} \\end{array} \"\"\"", "= input_size self.hidden_size = hidden_size self.i2h = nn.Linear(input_size, 3 * hidden_size, bias=bias) self.h2h", "super().__init__() \"\"\" For each element in the input sequence, each layer computes the", "3 * hidden_size, bias=bias) self.reset_parameters() def reset_parameters(self): std = 1.0 / math.sqrt(self.hidden_size) for", "def forward(self, input_, lengths, hidden=None): # input_ is of dimensionalty (T, B, input_size,", "i_n = gi.chunk(3, 1) h_r, h_i, h_n = gh.chunk(3, 1) resetgate_tmp = i_r", "batch_first=False, dropout=0.0): super().__init__() self.cell = MonotonicGruCell( input_size=input_size, hidden_size=hidden_size, bias=True) self.batch_first = batch_first def", "input_, lengths, hidden=None): # input_ is of dimensionalty (T, B, input_size, ...) #", "h_{(t-1)} + b_{hz}) \\\\ n_t = \\tanh(W_{in} x_t + b_{in} + (W_{hn}(r_t* h_{(t-1)})+", "input_size self.hidden_size = hidden_size self.i2h = nn.Linear(input_size, 3 * hidden_size, bias=bias) self.h2h =", "input_size if hidden is None: hidden = torch.zeros(x.size(0), self.hidden_size).to(device) gi = self.i2h(x) #", "+ h_i sigmoid = nn.Sigmoid() resetgate = sigmoid(resetgate_tmp) inputgate = sigmoid(inputgate_tmp) hr =", "is of dimensionalty (T, B, input_size, ...) # lenghths is B, dim =", "= [] for x in torch.unbind(input_, dim=dim): # x dim is B, I", "# x dim is B, I hidden = self.cell(x, hidden) outputs.append(hidden.clone()) hidden_states =", "+ W_{hz} h_{(t-1)} + b_{hz}) \\\\ n_t = \\tanh(W_{in} x_t + b_{in} +", "+ (1.-hidden) * inputgate * newgate return hy class MonotonicGru(nn.Module): def __init__(self, input_size,", "= \\sigma(W_{iz} x_t + b_{iz} + W_{hz} h_{(t-1)} + b_{hz}) \\\\ n_t =", "each element in the input sequence, each layer computes the following function: MonotonicGru", "self.i2h(x) # B, 3H gh = self.h2h(hidden) # B, 3H i_r, i_i, i_n", "self.reset_parameters() def reset_parameters(self): std = 1.0 / math.sqrt(self.hidden_size) for w in self.parameters(): w.data.uniform_(-std,", "dimensionalty (T, B, input_size, ...) # lenghths is B, dim = 1 if", "newgate = sigmoid(i_n + h_n) hy = hidden + (1.-hidden) * inputgate *", "hidden_size, bias=bias) self.h2h = nn.Linear(hidden_size, 3 * hidden_size, bias=bias) self.reset_parameters() def reset_parameters(self): std", "* newgate return hy class MonotonicGru(nn.Module): def __init__(self, input_size, hidden_size, bias=True, num_layers=1, batch_first=False,", "r_t = \\sigma(W_{ir} x_t + b_{ir} + W_{hr} h_{(t-1)} + b_{hr}) \\\\ z_t", "(1 - z_t) * n_t + h_{(t-1)} \\end{array} \"\"\" self.input_size = input_size self.hidden_size", "= gi.chunk(3, 1) h_r, h_i, h_n = gh.chunk(3, 1) resetgate_tmp = i_r +", "hr = self.h2h(hidden * resetgate) _, _, h_n = hr.chunk(3, 1) newgate =", "input_size=input_size, hidden_size=hidden_size, bias=True) self.batch_first = batch_first def forward(self, input_, lengths, hidden=None): # input_", "class MonotonicGru(nn.Module): def __init__(self, input_size, hidden_size, bias=True, num_layers=1, batch_first=False, dropout=0.0): super().__init__() self.cell =", "= torch.stack(outputs) # T, B, H last_states = [] for idx, l in", "# x is B, input_size if hidden is None: hidden = torch.zeros(x.size(0), self.hidden_size).to(device)", "= [] for idx, l in enumerate(lengths): last_states.append(hidden_states[l-1, idx, :]) last_states = torch.stack(last_states)", "sigmoid(inputgate_tmp) hr = self.h2h(hidden * resetgate) _, _, h_n = hr.chunk(3, 1) newgate", "= gh.chunk(3, 1) resetgate_tmp = i_r + h_r inputgate_tmp = i_i + h_i", "* resetgate) _, _, h_n = hr.chunk(3, 1) newgate = sigmoid(i_n + h_n)", "newgate return hy class MonotonicGru(nn.Module): def __init__(self, input_size, hidden_size, bias=True, num_layers=1, batch_first=False, dropout=0.0):", "1.0 / math.sqrt(self.hidden_size) for w in self.parameters(): w.data.uniform_(-std, std) def forward(self, x, hidden=None):", "# T, B, H last_states = [] for idx, l in enumerate(lengths): last_states.append(hidden_states[l-1,", "hidden + (1.-hidden) * inputgate * newgate return hy class MonotonicGru(nn.Module): def __init__(self,", "= MonotonicGruCell( input_size=input_size, hidden_size=hidden_size, bias=True) self.batch_first = batch_first def forward(self, input_, lengths, hidden=None):", "sigmoid = nn.Sigmoid() resetgate = sigmoid(resetgate_tmp) inputgate = sigmoid(inputgate_tmp) hr = self.h2h(hidden *", "b_{in} + (W_{hn}(r_t* h_{(t-1)})+ b_{hn})) \\\\ h_t = (1 - z_t) * n_t", "std) def forward(self, x, hidden=None): # x is B, input_size if hidden is", "__init__(self, input_size, hidden_size, bias=True): super().__init__() \"\"\" For each element in the input sequence,", "B, dim = 1 if self.batch_first else 0 outputs = [] for x", "1) resetgate_tmp = i_r + h_r inputgate_tmp = i_i + h_i sigmoid =", "hidden = torch.zeros(x.size(0), self.hidden_size).to(device) gi = self.i2h(x) # B, 3H gh = self.h2h(hidden)", "self.hidden_size = hidden_size self.i2h = nn.Linear(input_size, 3 * hidden_size, bias=bias) self.h2h = nn.Linear(hidden_size,", "h_i sigmoid = nn.Sigmoid() resetgate = sigmoid(resetgate_tmp) inputgate = sigmoid(inputgate_tmp) hr = self.h2h(hidden", "b_{hr}) \\\\ z_t = \\sigma(W_{iz} x_t + b_{iz} + W_{hz} h_{(t-1)} + b_{hz})", "hidden_size self.i2h = nn.Linear(input_size, 3 * hidden_size, bias=bias) self.h2h = nn.Linear(hidden_size, 3 *", "* hidden_size, bias=bias) self.reset_parameters() def reset_parameters(self): std = 1.0 / math.sqrt(self.hidden_size) for w", "computes the following function: MonotonicGru Math \\begin{array}{ll} r_t = \\sigma(W_{ir} x_t + b_{ir}", "+ h_r inputgate_tmp = i_i + h_i sigmoid = nn.Sigmoid() resetgate = sigmoid(resetgate_tmp)", "nn.Linear(hidden_size, 3 * hidden_size, bias=bias) self.reset_parameters() def reset_parameters(self): std = 1.0 / math.sqrt(self.hidden_size)", "super().__init__() self.cell = MonotonicGruCell( input_size=input_size, hidden_size=hidden_size, bias=True) self.batch_first = batch_first def forward(self, input_,", "h_{(t-1)} + b_{hr}) \\\\ z_t = \\sigma(W_{iz} x_t + b_{iz} + W_{hz} h_{(t-1)}", "torch.zeros(x.size(0), self.hidden_size).to(device) gi = self.i2h(x) # B, 3H gh = self.h2h(hidden) # B,", "H last_states = [] for idx, l in enumerate(lengths): last_states.append(hidden_states[l-1, idx, :]) last_states", "= torch.zeros(x.size(0), self.hidden_size).to(device) gi = self.i2h(x) # B, 3H gh = self.h2h(hidden) #", "h_r inputgate_tmp = i_i + h_i sigmoid = nn.Sigmoid() resetgate = sigmoid(resetgate_tmp) inputgate", "+ (W_{hn}(r_t* h_{(t-1)})+ b_{hn})) \\\\ h_t = (1 - z_t) * n_t +", "B, 3H i_r, i_i, i_n = gi.chunk(3, 1) h_r, h_i, h_n = gh.chunk(3,", "the input sequence, each layer computes the following function: MonotonicGru Math \\begin{array}{ll} r_t", "+ h_{(t-1)} \\end{array} \"\"\" self.input_size = input_size self.hidden_size = hidden_size self.i2h = nn.Linear(input_size,", "reset_parameters(self): std = 1.0 / math.sqrt(self.hidden_size) for w in self.parameters(): w.data.uniform_(-std, std) def", "is B, input_size if hidden is None: hidden = torch.zeros(x.size(0), self.hidden_size).to(device) gi =", "T, B, H last_states = [] for idx, l in enumerate(lengths): last_states.append(hidden_states[l-1, idx,", "1 if self.batch_first else 0 outputs = [] for x in torch.unbind(input_, dim=dim):", "0 outputs = [] for x in torch.unbind(input_, dim=dim): # x dim is", "3 * hidden_size, bias=bias) self.h2h = nn.Linear(hidden_size, 3 * hidden_size, bias=bias) self.reset_parameters() def", "hy class MonotonicGru(nn.Module): def __init__(self, input_size, hidden_size, bias=True, num_layers=1, batch_first=False, dropout=0.0): super().__init__() self.cell", "* inputgate * newgate return hy class MonotonicGru(nn.Module): def __init__(self, input_size, hidden_size, bias=True,", "= (1 - z_t) * n_t + h_{(t-1)} \\end{array} \"\"\" self.input_size = input_size", "dim = 1 if self.batch_first else 0 outputs = [] for x in", "self.cell(x, hidden) outputs.append(hidden.clone()) hidden_states = torch.stack(outputs) # T, B, H last_states = []", "hidden_size, bias=bias) self.reset_parameters() def reset_parameters(self): std = 1.0 / math.sqrt(self.hidden_size) for w in", "= hr.chunk(3, 1) newgate = sigmoid(i_n + h_n) hy = hidden + (1.-hidden)", "= self.h2h(hidden) # B, 3H i_r, i_i, i_n = gi.chunk(3, 1) h_r, h_i,", "MonotonicGru Math \\begin{array}{ll} r_t = \\sigma(W_{ir} x_t + b_{ir} + W_{hr} h_{(t-1)} +", "x_t + b_{ir} + W_{hr} h_{(t-1)} + b_{hr}) \\\\ z_t = \\sigma(W_{iz} x_t", "i_i, i_n = gi.chunk(3, 1) h_r, h_i, h_n = gh.chunk(3, 1) resetgate_tmp =", "self.batch_first else 0 outputs = [] for x in torch.unbind(input_, dim=dim): # x", "__init__(self, input_size, hidden_size, bias=True, num_layers=1, batch_first=False, dropout=0.0): super().__init__() self.cell = MonotonicGruCell( input_size=input_size, hidden_size=hidden_size,", "input_size, ...) # lenghths is B, dim = 1 if self.batch_first else 0", "+ b_{in} + (W_{hn}(r_t* h_{(t-1)})+ b_{hn})) \\\\ h_t = (1 - z_t) *", "...) # lenghths is B, dim = 1 if self.batch_first else 0 outputs", "following function: MonotonicGru Math \\begin{array}{ll} r_t = \\sigma(W_{ir} x_t + b_{ir} + W_{hr}", "def reset_parameters(self): std = 1.0 / math.sqrt(self.hidden_size) for w in self.parameters(): w.data.uniform_(-std, std)", "3H i_r, i_i, i_n = gi.chunk(3, 1) h_r, h_i, h_n = gh.chunk(3, 1)", "= self.cell(x, hidden) outputs.append(hidden.clone()) hidden_states = torch.stack(outputs) # T, B, H last_states =", "\"cpu\") class MonotonicGruCell(nn.Module): def __init__(self, input_size, hidden_size, bias=True): super().__init__() \"\"\" For each element", "= hidden + (1.-hidden) * inputgate * newgate return hy class MonotonicGru(nn.Module): def", "= \\tanh(W_{in} x_t + b_{in} + (W_{hn}(r_t* h_{(t-1)})+ b_{hn})) \\\\ h_t = (1", "def __init__(self, input_size, hidden_size, bias=True, num_layers=1, batch_first=False, dropout=0.0): super().__init__() self.cell = MonotonicGruCell( input_size=input_size,", "hy = hidden + (1.-hidden) * inputgate * newgate return hy class MonotonicGru(nn.Module):", "outputs = [] for x in torch.unbind(input_, dim=dim): # x dim is B,", "h_{(t-1)})+ b_{hn})) \\\\ h_t = (1 - z_t) * n_t + h_{(t-1)} \\end{array}", "bias=bias) self.reset_parameters() def reset_parameters(self): std = 1.0 / math.sqrt(self.hidden_size) for w in self.parameters():", "input_size, hidden_size, bias=True): super().__init__() \"\"\" For each element in the input sequence, each", "h_n = gh.chunk(3, 1) resetgate_tmp = i_r + h_r inputgate_tmp = i_i +", "resetgate) _, _, h_n = hr.chunk(3, 1) newgate = sigmoid(i_n + h_n) hy", "# input_ is of dimensionalty (T, B, input_size, ...) # lenghths is B,", "torch.stack(outputs) # T, B, H last_states = [] for idx, l in enumerate(lengths):", "the following function: MonotonicGru Math \\begin{array}{ll} r_t = \\sigma(W_{ir} x_t + b_{ir} +", "= sigmoid(i_n + h_n) hy = hidden + (1.-hidden) * inputgate * newgate", "math device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") class MonotonicGruCell(nn.Module): def __init__(self, input_size,", "* n_t + h_{(t-1)} \\end{array} \"\"\" self.input_size = input_size self.hidden_size = hidden_size self.i2h", "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") class MonotonicGruCell(nn.Module): def __init__(self, input_size, hidden_size,", "else \"cpu\") class MonotonicGruCell(nn.Module): def __init__(self, input_size, hidden_size, bias=True): super().__init__() \"\"\" For each", "= 1.0 / math.sqrt(self.hidden_size) for w in self.parameters(): w.data.uniform_(-std, std) def forward(self, x,", "inputgate = sigmoid(inputgate_tmp) hr = self.h2h(hidden * resetgate) _, _, h_n = hr.chunk(3,", "None: hidden = torch.zeros(x.size(0), self.hidden_size).to(device) gi = self.i2h(x) # B, 3H gh =", "is None: hidden = torch.zeros(x.size(0), self.hidden_size).to(device) gi = self.i2h(x) # B, 3H gh", "input_ is of dimensionalty (T, B, input_size, ...) # lenghths is B, dim", "x_t + b_{in} + (W_{hn}(r_t* h_{(t-1)})+ b_{hn})) \\\\ h_t = (1 - z_t)", "B, input_size if hidden is None: hidden = torch.zeros(x.size(0), self.hidden_size).to(device) gi = self.i2h(x)", "\\\\ z_t = \\sigma(W_{iz} x_t + b_{iz} + W_{hz} h_{(t-1)} + b_{hz}) \\\\", "layer computes the following function: MonotonicGru Math \\begin{array}{ll} r_t = \\sigma(W_{ir} x_t +", "import torch import torch.nn as nn import math device = torch.device(\"cuda\" if torch.cuda.is_available()", "forward(self, x, hidden=None): # x is B, input_size if hidden is None: hidden", "= self.i2h(x) # B, 3H gh = self.h2h(hidden) # B, 3H i_r, i_i,", "if self.batch_first else 0 outputs = [] for x in torch.unbind(input_, dim=dim): #", "gi = self.i2h(x) # B, 3H gh = self.h2h(hidden) # B, 3H i_r,", "self.h2h(hidden) # B, 3H i_r, i_i, i_n = gi.chunk(3, 1) h_r, h_i, h_n", "torch import torch.nn as nn import math device = torch.device(\"cuda\" if torch.cuda.is_available() else", "(W_{hn}(r_t* h_{(t-1)})+ b_{hn})) \\\\ h_t = (1 - z_t) * n_t + h_{(t-1)}", "batch_first def forward(self, input_, lengths, hidden=None): # input_ is of dimensionalty (T, B,", "\"\"\" For each element in the input sequence, each layer computes the following", "hidden_size, bias=True, num_layers=1, batch_first=False, dropout=0.0): super().__init__() self.cell = MonotonicGruCell( input_size=input_size, hidden_size=hidden_size, bias=True) self.batch_first", "self.i2h = nn.Linear(input_size, 3 * hidden_size, bias=bias) self.h2h = nn.Linear(hidden_size, 3 * hidden_size,", "w.data.uniform_(-std, std) def forward(self, x, hidden=None): # x is B, input_size if hidden", "num_layers=1, batch_first=False, dropout=0.0): super().__init__() self.cell = MonotonicGruCell( input_size=input_size, hidden_size=hidden_size, bias=True) self.batch_first = batch_first", "[] for idx, l in enumerate(lengths): last_states.append(hidden_states[l-1, idx, :]) last_states = torch.stack(last_states) return", "lengths, hidden=None): # input_ is of dimensionalty (T, B, input_size, ...) # lenghths", "element in the input sequence, each layer computes the following function: MonotonicGru Math", "/ math.sqrt(self.hidden_size) for w in self.parameters(): w.data.uniform_(-std, std) def forward(self, x, hidden=None): #", "W_{hz} h_{(t-1)} + b_{hz}) \\\\ n_t = \\tanh(W_{in} x_t + b_{in} + (W_{hn}(r_t*", "bias=True): super().__init__() \"\"\" For each element in the input sequence, each layer computes", "i_i + h_i sigmoid = nn.Sigmoid() resetgate = sigmoid(resetgate_tmp) inputgate = sigmoid(inputgate_tmp) hr", "hidden=None): # input_ is of dimensionalty (T, B, input_size, ...) # lenghths is", "(T, B, input_size, ...) # lenghths is B, dim = 1 if self.batch_first", "import torch.nn as nn import math device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")", "hidden_states = torch.stack(outputs) # T, B, H last_states = [] for idx, l", "dim=dim): # x dim is B, I hidden = self.cell(x, hidden) outputs.append(hidden.clone()) hidden_states", "_, _, h_n = hr.chunk(3, 1) newgate = sigmoid(i_n + h_n) hy =", "in self.parameters(): w.data.uniform_(-std, std) def forward(self, x, hidden=None): # x is B, input_size", "for w in self.parameters(): w.data.uniform_(-std, std) def forward(self, x, hidden=None): # x is", "\\begin{array}{ll} r_t = \\sigma(W_{ir} x_t + b_{ir} + W_{hr} h_{(t-1)} + b_{hr}) \\\\", "w in self.parameters(): w.data.uniform_(-std, std) def forward(self, x, hidden=None): # x is B,", "bias=bias) self.h2h = nn.Linear(hidden_size, 3 * hidden_size, bias=bias) self.reset_parameters() def reset_parameters(self): std =", "else 0 outputs = [] for x in torch.unbind(input_, dim=dim): # x dim", "\\sigma(W_{ir} x_t + b_{ir} + W_{hr} h_{(t-1)} + b_{hr}) \\\\ z_t = \\sigma(W_{iz}", "= hidden_size self.i2h = nn.Linear(input_size, 3 * hidden_size, bias=bias) self.h2h = nn.Linear(hidden_size, 3", "class MonotonicGruCell(nn.Module): def __init__(self, input_size, hidden_size, bias=True): super().__init__() \"\"\" For each element in", "# B, 3H i_r, i_i, i_n = gi.chunk(3, 1) h_r, h_i, h_n =", "hidden=None): # x is B, input_size if hidden is None: hidden = torch.zeros(x.size(0),", "MonotonicGruCell(nn.Module): def __init__(self, input_size, hidden_size, bias=True): super().__init__() \"\"\" For each element in the", "hidden_size, bias=True): super().__init__() \"\"\" For each element in the input sequence, each layer", "x is B, input_size if hidden is None: hidden = torch.zeros(x.size(0), self.hidden_size).to(device) gi", "hr.chunk(3, 1) newgate = sigmoid(i_n + h_n) hy = hidden + (1.-hidden) *", "hidden = self.cell(x, hidden) outputs.append(hidden.clone()) hidden_states = torch.stack(outputs) # T, B, H last_states", "nn.Linear(input_size, 3 * hidden_size, bias=bias) self.h2h = nn.Linear(hidden_size, 3 * hidden_size, bias=bias) self.reset_parameters()", "B, input_size, ...) # lenghths is B, dim = 1 if self.batch_first else", "function: MonotonicGru Math \\begin{array}{ll} r_t = \\sigma(W_{ir} x_t + b_{ir} + W_{hr} h_{(t-1)}", "- z_t) * n_t + h_{(t-1)} \\end{array} \"\"\" self.input_size = input_size self.hidden_size =", "lenghths is B, dim = 1 if self.batch_first else 0 outputs = []", "if torch.cuda.is_available() else \"cpu\") class MonotonicGruCell(nn.Module): def __init__(self, input_size, hidden_size, bias=True): super().__init__() \"\"\"", "b_{hz}) \\\\ n_t = \\tanh(W_{in} x_t + b_{in} + (W_{hn}(r_t* h_{(t-1)})+ b_{hn})) \\\\", "= nn.Linear(input_size, 3 * hidden_size, bias=bias) self.h2h = nn.Linear(hidden_size, 3 * hidden_size, bias=bias)", "resetgate_tmp = i_r + h_r inputgate_tmp = i_i + h_i sigmoid = nn.Sigmoid()", "as nn import math device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") class MonotonicGruCell(nn.Module):", "h_r, h_i, h_n = gh.chunk(3, 1) resetgate_tmp = i_r + h_r inputgate_tmp =", "def forward(self, x, hidden=None): # x is B, input_size if hidden is None:", "1) newgate = sigmoid(i_n + h_n) hy = hidden + (1.-hidden) * inputgate", "dim is B, I hidden = self.cell(x, hidden) outputs.append(hidden.clone()) hidden_states = torch.stack(outputs) #", "= nn.Linear(hidden_size, 3 * hidden_size, bias=bias) self.reset_parameters() def reset_parameters(self): std = 1.0 /", "n_t + h_{(t-1)} \\end{array} \"\"\" self.input_size = input_size self.hidden_size = hidden_size self.i2h =", "= \\sigma(W_{ir} x_t + b_{ir} + W_{hr} h_{(t-1)} + b_{hr}) \\\\ z_t =", "self.parameters(): w.data.uniform_(-std, std) def forward(self, x, hidden=None): # x is B, input_size if", "for idx, l in enumerate(lengths): last_states.append(hidden_states[l-1, idx, :]) last_states = torch.stack(last_states) return hidden_states,", "z_t) * n_t + h_{(t-1)} \\end{array} \"\"\" self.input_size = input_size self.hidden_size = hidden_size", "+ h_n) hy = hidden + (1.-hidden) * inputgate * newgate return hy", "# B, 3H gh = self.h2h(hidden) # B, 3H i_r, i_i, i_n =", "is B, dim = 1 if self.batch_first else 0 outputs = [] for", "resetgate = sigmoid(resetgate_tmp) inputgate = sigmoid(inputgate_tmp) hr = self.h2h(hidden * resetgate) _, _,", "self.h2h(hidden * resetgate) _, _, h_n = hr.chunk(3, 1) newgate = sigmoid(i_n +", "= sigmoid(inputgate_tmp) hr = self.h2h(hidden * resetgate) _, _, h_n = hr.chunk(3, 1)", "x dim is B, I hidden = self.cell(x, hidden) outputs.append(hidden.clone()) hidden_states = torch.stack(outputs)", "+ b_{iz} + W_{hz} h_{(t-1)} + b_{hz}) \\\\ n_t = \\tanh(W_{in} x_t +", "x, hidden=None): # x is B, input_size if hidden is None: hidden =", "for x in torch.unbind(input_, dim=dim): # x dim is B, I hidden =", "= batch_first def forward(self, input_, lengths, hidden=None): # input_ is of dimensionalty (T,", "= i_i + h_i sigmoid = nn.Sigmoid() resetgate = sigmoid(resetgate_tmp) inputgate = sigmoid(inputgate_tmp)", "+ b_{hr}) \\\\ z_t = \\sigma(W_{iz} x_t + b_{iz} + W_{hz} h_{(t-1)} +", "gh = self.h2h(hidden) # B, 3H i_r, i_i, i_n = gi.chunk(3, 1) h_r,", "B, H last_states = [] for idx, l in enumerate(lengths): last_states.append(hidden_states[l-1, idx, :])", "if hidden is None: hidden = torch.zeros(x.size(0), self.hidden_size).to(device) gi = self.i2h(x) # B,", "in the input sequence, each layer computes the following function: MonotonicGru Math \\begin{array}{ll}", "self.hidden_size).to(device) gi = self.i2h(x) # B, 3H gh = self.h2h(hidden) # B, 3H", "last_states = [] for idx, l in enumerate(lengths): last_states.append(hidden_states[l-1, idx, :]) last_states =", "\\\\ n_t = \\tanh(W_{in} x_t + b_{in} + (W_{hn}(r_t* h_{(t-1)})+ b_{hn})) \\\\ h_t", "of dimensionalty (T, B, input_size, ...) # lenghths is B, dim = 1", "W_{hr} h_{(t-1)} + b_{hr}) \\\\ z_t = \\sigma(W_{iz} x_t + b_{iz} + W_{hz}", "torch.cuda.is_available() else \"cpu\") class MonotonicGruCell(nn.Module): def __init__(self, input_size, hidden_size, bias=True): super().__init__() \"\"\" For", "* hidden_size, bias=bias) self.h2h = nn.Linear(hidden_size, 3 * hidden_size, bias=bias) self.reset_parameters() def reset_parameters(self):", "def __init__(self, input_size, hidden_size, bias=True): super().__init__() \"\"\" For each element in the input", "gi.chunk(3, 1) h_r, h_i, h_n = gh.chunk(3, 1) resetgate_tmp = i_r + h_r", "torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") class MonotonicGruCell(nn.Module): def __init__(self, input_size, hidden_size, bias=True): super().__init__()", "h_i, h_n = gh.chunk(3, 1) resetgate_tmp = i_r + h_r inputgate_tmp = i_i", "b_{ir} + W_{hr} h_{(t-1)} + b_{hr}) \\\\ z_t = \\sigma(W_{iz} x_t + b_{iz}", "[] for x in torch.unbind(input_, dim=dim): # x dim is B, I hidden", "inputgate_tmp = i_i + h_i sigmoid = nn.Sigmoid() resetgate = sigmoid(resetgate_tmp) inputgate =", "nn.Sigmoid() resetgate = sigmoid(resetgate_tmp) inputgate = sigmoid(inputgate_tmp) hr = self.h2h(hidden * resetgate) _,", "sequence, each layer computes the following function: MonotonicGru Math \\begin{array}{ll} r_t = \\sigma(W_{ir}", "= i_r + h_r inputgate_tmp = i_i + h_i sigmoid = nn.Sigmoid() resetgate", "\"\"\" self.input_size = input_size self.hidden_size = hidden_size self.i2h = nn.Linear(input_size, 3 * hidden_size,", "sigmoid(resetgate_tmp) inputgate = sigmoid(inputgate_tmp) hr = self.h2h(hidden * resetgate) _, _, h_n =", "self.cell = MonotonicGruCell( input_size=input_size, hidden_size=hidden_size, bias=True) self.batch_first = batch_first def forward(self, input_, lengths,", "h_{(t-1)} \\end{array} \"\"\" self.input_size = input_size self.hidden_size = hidden_size self.i2h = nn.Linear(input_size, 3", "math.sqrt(self.hidden_size) for w in self.parameters(): w.data.uniform_(-std, std) def forward(self, x, hidden=None): # x", "return hy class MonotonicGru(nn.Module): def __init__(self, input_size, hidden_size, bias=True, num_layers=1, batch_first=False, dropout=0.0): super().__init__()", "= 1 if self.batch_first else 0 outputs = [] for x in torch.unbind(input_,", "hidden_size=hidden_size, bias=True) self.batch_first = batch_first def forward(self, input_, lengths, hidden=None): # input_ is", "torch.unbind(input_, dim=dim): # x dim is B, I hidden = self.cell(x, hidden) outputs.append(hidden.clone())", "inputgate * newgate return hy class MonotonicGru(nn.Module): def __init__(self, input_size, hidden_size, bias=True, num_layers=1,", "\\sigma(W_{iz} x_t + b_{iz} + W_{hz} h_{(t-1)} + b_{hz}) \\\\ n_t = \\tanh(W_{in}", "z_t = \\sigma(W_{iz} x_t + b_{iz} + W_{hz} h_{(t-1)} + b_{hz}) \\\\ n_t", "= torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") class MonotonicGruCell(nn.Module): def __init__(self, input_size, hidden_size, bias=True):", "input sequence, each layer computes the following function: MonotonicGru Math \\begin{array}{ll} r_t =", "dropout=0.0): super().__init__() self.cell = MonotonicGruCell( input_size=input_size, hidden_size=hidden_size, bias=True) self.batch_first = batch_first def forward(self,", "+ b_{ir} + W_{hr} h_{(t-1)} + b_{hr}) \\\\ z_t = \\sigma(W_{iz} x_t +", "(1.-hidden) * inputgate * newgate return hy class MonotonicGru(nn.Module): def __init__(self, input_size, hidden_size,", "idx, l in enumerate(lengths): last_states.append(hidden_states[l-1, idx, :]) last_states = torch.stack(last_states) return hidden_states, last_states", "+ W_{hr} h_{(t-1)} + b_{hr}) \\\\ z_t = \\sigma(W_{iz} x_t + b_{iz} +", "sigmoid(i_n + h_n) hy = hidden + (1.-hidden) * inputgate * newgate return", "h_t = (1 - z_t) * n_t + h_{(t-1)} \\end{array} \"\"\" self.input_size =", "h_n = hr.chunk(3, 1) newgate = sigmoid(i_n + h_n) hy = hidden +", "forward(self, input_, lengths, hidden=None): # input_ is of dimensionalty (T, B, input_size, ...)", "B, I hidden = self.cell(x, hidden) outputs.append(hidden.clone()) hidden_states = torch.stack(outputs) # T, B,", "outputs.append(hidden.clone()) hidden_states = torch.stack(outputs) # T, B, H last_states = [] for idx,", "1) h_r, h_i, h_n = gh.chunk(3, 1) resetgate_tmp = i_r + h_r inputgate_tmp", "MonotonicGru(nn.Module): def __init__(self, input_size, hidden_size, bias=True, num_layers=1, batch_first=False, dropout=0.0): super().__init__() self.cell = MonotonicGruCell(", "# lenghths is B, dim = 1 if self.batch_first else 0 outputs =", "self.h2h = nn.Linear(hidden_size, 3 * hidden_size, bias=bias) self.reset_parameters() def reset_parameters(self): std = 1.0", "hidden) outputs.append(hidden.clone()) hidden_states = torch.stack(outputs) # T, B, H last_states = [] for", "bias=True) self.batch_first = batch_first def forward(self, input_, lengths, hidden=None): # input_ is of", "\\tanh(W_{in} x_t + b_{in} + (W_{hn}(r_t* h_{(t-1)})+ b_{hn})) \\\\ h_t = (1 -", "each layer computes the following function: MonotonicGru Math \\begin{array}{ll} r_t = \\sigma(W_{ir} x_t", "hidden is None: hidden = torch.zeros(x.size(0), self.hidden_size).to(device) gi = self.i2h(x) # B, 3H", "3H gh = self.h2h(hidden) # B, 3H i_r, i_i, i_n = gi.chunk(3, 1)", "_, h_n = hr.chunk(3, 1) newgate = sigmoid(i_n + h_n) hy = hidden", "\\end{array} \"\"\" self.input_size = input_size self.hidden_size = hidden_size self.i2h = nn.Linear(input_size, 3 *", "gh.chunk(3, 1) resetgate_tmp = i_r + h_r inputgate_tmp = i_i + h_i sigmoid", "i_r, i_i, i_n = gi.chunk(3, 1) h_r, h_i, h_n = gh.chunk(3, 1) resetgate_tmp", "is B, I hidden = self.cell(x, hidden) outputs.append(hidden.clone()) hidden_states = torch.stack(outputs) # T,", "I hidden = self.cell(x, hidden) outputs.append(hidden.clone()) hidden_states = torch.stack(outputs) # T, B, H", "= nn.Sigmoid() resetgate = sigmoid(resetgate_tmp) inputgate = sigmoid(inputgate_tmp) hr = self.h2h(hidden * resetgate)", "b_{iz} + W_{hz} h_{(t-1)} + b_{hz}) \\\\ n_t = \\tanh(W_{in} x_t + b_{in}" ]
[ "event evert hour, but with cron schedule = Schedule( actor=instance, verb=EVERY_HOUR, minute=instance.created.minute, hour=\"*/1\"", "datetime from django.db.models.signals import post_save from django.dispatch import receiver from django_celery_beat.models import DAYS", "from django.db.models.signals import post_save from django.dispatch import receiver from django_celery_beat.models import DAYS from", "schedule = Schedule( actor=instance, verb=DUMMY_EVENT, limit=1, every=2, period=DAYS, start_time=instance.created + datetime.timedelta(days=2), ) schedule.save()", "DAYS from snitch.schedules.models import Schedule from tests.app.events import DUMMY_EVENT, EVERY_HOUR from tests.app.models import", "django.dispatch import receiver from django_celery_beat.models import DAYS from snitch.schedules.models import Schedule from tests.app.events", "dummy event in 2 days schedule = Schedule( actor=instance, verb=DUMMY_EVENT, limit=1, every=2, period=DAYS,", "# sent a dummy event evert hour, but with cron schedule = Schedule(", "schedules for other stuff.\"\"\" if created: # sent a dummy event in 2", "created: # sent a dummy event in 2 days schedule = Schedule( actor=instance,", "limit=1, every=2, period=DAYS, start_time=instance.created + datetime.timedelta(days=2), ) schedule.save() # sent a dummy event", "every=2, period=DAYS, start_time=instance.created + datetime.timedelta(days=2), ) schedule.save() # sent a dummy event evert", "schedule.save() # sent a dummy event evert hour, but with cron schedule =", "from tests.app.events import DUMMY_EVENT, EVERY_HOUR from tests.app.models import OtherStuff @receiver(post_save, sender=OtherStuff) def post_save_other_stuff(sender,", "sent a dummy event evert hour, but with cron schedule = Schedule( actor=instance,", "@receiver(post_save, sender=OtherStuff) def post_save_other_stuff(sender, instance, created, **kwargs): \"\"\"Creates the schedules for other stuff.\"\"\"", "from snitch.schedules.models import Schedule from tests.app.events import DUMMY_EVENT, EVERY_HOUR from tests.app.models import OtherStuff", "**kwargs): \"\"\"Creates the schedules for other stuff.\"\"\" if created: # sent a dummy", "for other stuff.\"\"\" if created: # sent a dummy event in 2 days", "Schedule( actor=instance, verb=DUMMY_EVENT, limit=1, every=2, period=DAYS, start_time=instance.created + datetime.timedelta(days=2), ) schedule.save() # sent", "post_save from django.dispatch import receiver from django_celery_beat.models import DAYS from snitch.schedules.models import Schedule", "OtherStuff @receiver(post_save, sender=OtherStuff) def post_save_other_stuff(sender, instance, created, **kwargs): \"\"\"Creates the schedules for other", "= Schedule( actor=instance, verb=DUMMY_EVENT, limit=1, every=2, period=DAYS, start_time=instance.created + datetime.timedelta(days=2), ) schedule.save() #", "a dummy event evert hour, but with cron schedule = Schedule( actor=instance, verb=EVERY_HOUR,", "tests.app.events import DUMMY_EVENT, EVERY_HOUR from tests.app.models import OtherStuff @receiver(post_save, sender=OtherStuff) def post_save_other_stuff(sender, instance,", "def post_save_other_stuff(sender, instance, created, **kwargs): \"\"\"Creates the schedules for other stuff.\"\"\" if created:", "other stuff.\"\"\" if created: # sent a dummy event in 2 days schedule", "dummy event evert hour, but with cron schedule = Schedule( actor=instance, verb=EVERY_HOUR, minute=instance.created.minute,", "import DAYS from snitch.schedules.models import Schedule from tests.app.events import DUMMY_EVENT, EVERY_HOUR from tests.app.models", "sender=OtherStuff) def post_save_other_stuff(sender, instance, created, **kwargs): \"\"\"Creates the schedules for other stuff.\"\"\" if", "receiver from django_celery_beat.models import DAYS from snitch.schedules.models import Schedule from tests.app.events import DUMMY_EVENT,", "django_celery_beat.models import DAYS from snitch.schedules.models import Schedule from tests.app.events import DUMMY_EVENT, EVERY_HOUR from", "created, **kwargs): \"\"\"Creates the schedules for other stuff.\"\"\" if created: # sent a", "\"\"\"Creates the schedules for other stuff.\"\"\" if created: # sent a dummy event", "snitch.schedules.models import Schedule from tests.app.events import DUMMY_EVENT, EVERY_HOUR from tests.app.models import OtherStuff @receiver(post_save,", "import OtherStuff @receiver(post_save, sender=OtherStuff) def post_save_other_stuff(sender, instance, created, **kwargs): \"\"\"Creates the schedules for", "2 days schedule = Schedule( actor=instance, verb=DUMMY_EVENT, limit=1, every=2, period=DAYS, start_time=instance.created + datetime.timedelta(days=2),", "actor=instance, verb=DUMMY_EVENT, limit=1, every=2, period=DAYS, start_time=instance.created + datetime.timedelta(days=2), ) schedule.save() # sent a", "import receiver from django_celery_beat.models import DAYS from snitch.schedules.models import Schedule from tests.app.events import", "start_time=instance.created + datetime.timedelta(days=2), ) schedule.save() # sent a dummy event evert hour, but", "verb=DUMMY_EVENT, limit=1, every=2, period=DAYS, start_time=instance.created + datetime.timedelta(days=2), ) schedule.save() # sent a dummy", "# sent a dummy event in 2 days schedule = Schedule( actor=instance, verb=DUMMY_EVENT,", "DUMMY_EVENT, EVERY_HOUR from tests.app.models import OtherStuff @receiver(post_save, sender=OtherStuff) def post_save_other_stuff(sender, instance, created, **kwargs):", "<gh_stars>10-100 import datetime from django.db.models.signals import post_save from django.dispatch import receiver from django_celery_beat.models", "period=DAYS, start_time=instance.created + datetime.timedelta(days=2), ) schedule.save() # sent a dummy event evert hour,", "in 2 days schedule = Schedule( actor=instance, verb=DUMMY_EVENT, limit=1, every=2, period=DAYS, start_time=instance.created +", "from django.dispatch import receiver from django_celery_beat.models import DAYS from snitch.schedules.models import Schedule from", ") schedule.save() # sent a dummy event evert hour, but with cron schedule", "django.db.models.signals import post_save from django.dispatch import receiver from django_celery_beat.models import DAYS from snitch.schedules.models", "import post_save from django.dispatch import receiver from django_celery_beat.models import DAYS from snitch.schedules.models import", "sent a dummy event in 2 days schedule = Schedule( actor=instance, verb=DUMMY_EVENT, limit=1,", "days schedule = Schedule( actor=instance, verb=DUMMY_EVENT, limit=1, every=2, period=DAYS, start_time=instance.created + datetime.timedelta(days=2), )", "a dummy event in 2 days schedule = Schedule( actor=instance, verb=DUMMY_EVENT, limit=1, every=2,", "import datetime from django.db.models.signals import post_save from django.dispatch import receiver from django_celery_beat.models import", "+ datetime.timedelta(days=2), ) schedule.save() # sent a dummy event evert hour, but with", "datetime.timedelta(days=2), ) schedule.save() # sent a dummy event evert hour, but with cron", "EVERY_HOUR from tests.app.models import OtherStuff @receiver(post_save, sender=OtherStuff) def post_save_other_stuff(sender, instance, created, **kwargs): \"\"\"Creates", "evert hour, but with cron schedule = Schedule( actor=instance, verb=EVERY_HOUR, minute=instance.created.minute, hour=\"*/1\" )", "import Schedule from tests.app.events import DUMMY_EVENT, EVERY_HOUR from tests.app.models import OtherStuff @receiver(post_save, sender=OtherStuff)", "instance, created, **kwargs): \"\"\"Creates the schedules for other stuff.\"\"\" if created: # sent", "tests.app.models import OtherStuff @receiver(post_save, sender=OtherStuff) def post_save_other_stuff(sender, instance, created, **kwargs): \"\"\"Creates the schedules", "the schedules for other stuff.\"\"\" if created: # sent a dummy event in", "stuff.\"\"\" if created: # sent a dummy event in 2 days schedule =", "from django_celery_beat.models import DAYS from snitch.schedules.models import Schedule from tests.app.events import DUMMY_EVENT, EVERY_HOUR", "hour, but with cron schedule = Schedule( actor=instance, verb=EVERY_HOUR, minute=instance.created.minute, hour=\"*/1\" ) schedule.save()", "import DUMMY_EVENT, EVERY_HOUR from tests.app.models import OtherStuff @receiver(post_save, sender=OtherStuff) def post_save_other_stuff(sender, instance, created,", "if created: # sent a dummy event in 2 days schedule = Schedule(", "from tests.app.models import OtherStuff @receiver(post_save, sender=OtherStuff) def post_save_other_stuff(sender, instance, created, **kwargs): \"\"\"Creates the", "post_save_other_stuff(sender, instance, created, **kwargs): \"\"\"Creates the schedules for other stuff.\"\"\" if created: #", "event in 2 days schedule = Schedule( actor=instance, verb=DUMMY_EVENT, limit=1, every=2, period=DAYS, start_time=instance.created", "Schedule from tests.app.events import DUMMY_EVENT, EVERY_HOUR from tests.app.models import OtherStuff @receiver(post_save, sender=OtherStuff) def" ]
[ "Noriter.UI.Layout import * from Noriter.UI import NoriterWindow as nWin class SideWindow (nWin.NoriterWindow, QtGui.QWidget):", "NoriterWindow as nWin class SideWindow (nWin.NoriterWindow, QtGui.QWidget): def __init__(self): super(SideWindow, self).__init__() self.setContextMenuPolicy(QtCore.Qt.CustomContextMenu) @LayoutGUI", "from PySide import QtCore,QtGui from Noriter.UI.Layout import * from Noriter.UI import NoriterWindow as", "import QtCore,QtGui from Noriter.UI.Layout import * from Noriter.UI import NoriterWindow as nWin class", "import * from Noriter.UI import NoriterWindow as nWin class SideWindow (nWin.NoriterWindow, QtGui.QWidget): def", "import NoriterWindow as nWin class SideWindow (nWin.NoriterWindow, QtGui.QWidget): def __init__(self): super(SideWindow, self).__init__() self.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)", "PySide import QtCore,QtGui from Noriter.UI.Layout import * from Noriter.UI import NoriterWindow as nWin", "from Noriter.UI.Layout import * from Noriter.UI import NoriterWindow as nWin class SideWindow (nWin.NoriterWindow,", "as nWin class SideWindow (nWin.NoriterWindow, QtGui.QWidget): def __init__(self): super(SideWindow, self).__init__() self.setContextMenuPolicy(QtCore.Qt.CustomContextMenu) @LayoutGUI def", "* from Noriter.UI import NoriterWindow as nWin class SideWindow (nWin.NoriterWindow, QtGui.QWidget): def __init__(self):", "class SideWindow (nWin.NoriterWindow, QtGui.QWidget): def __init__(self): super(SideWindow, self).__init__() self.setContextMenuPolicy(QtCore.Qt.CustomContextMenu) @LayoutGUI def GUI(self): pass", "Noriter.UI import NoriterWindow as nWin class SideWindow (nWin.NoriterWindow, QtGui.QWidget): def __init__(self): super(SideWindow, self).__init__()", "<filename>Editor/Noriter/Noriter/UI/SideWindow.py from PySide import QtCore,QtGui from Noriter.UI.Layout import * from Noriter.UI import NoriterWindow", "nWin class SideWindow (nWin.NoriterWindow, QtGui.QWidget): def __init__(self): super(SideWindow, self).__init__() self.setContextMenuPolicy(QtCore.Qt.CustomContextMenu) @LayoutGUI def GUI(self):", "from Noriter.UI import NoriterWindow as nWin class SideWindow (nWin.NoriterWindow, QtGui.QWidget): def __init__(self): super(SideWindow,", "QtCore,QtGui from Noriter.UI.Layout import * from Noriter.UI import NoriterWindow as nWin class SideWindow" ]
[ "and x.state not in ('done', 'cancel')) if move: product_uom_factor = prod.product_uom_id._compute_quantity(prod.product_qty - prod.qty_produced,", "product_uom_factor = prod.product_uom_id._compute_quantity(prod.product_qty - prod.qty_produced, prod.bom_id.product_uom_id) qty1 = sub_product_line.product_qty qty1 *= product_uom_factor /", "Odoo. See LICENSE file for full copyright and licensing details. from odoo import", "qty1 *= product_uom_factor / prod.bom_id.product_qty modification[move[0]] = (qty1, move[0].product_uom_qty) move[0].write({'product_uom_qty': qty1}) else: move", "= super(ChangeProductionQty, self)._update_product_to_produce(prod, qty, old_qty) for sub_product_line in prod.bom_id.sub_products: move = prod.move_finished_ids.filtered(lambda x:", "from odoo import api, models class ChangeProductionQty(models.TransientModel): _inherit = 'change.production.qty' @api.model def _update_product_to_produce(self,", "'cancel')) if move: product_uom_factor = prod.product_uom_id._compute_quantity(prod.product_qty - prod.qty_produced, prod.bom_id.product_uom_id) qty1 = sub_product_line.product_qty qty1", "prod.qty_produced, prod.bom_id.product_uom_id) qty1 = sub_product_line.product_qty qty1 *= product_uom_factor / prod.bom_id.product_qty modification[move[0]] = (qty1,", "*= product_uom_factor / prod.bom_id.product_qty modification[move[0]] = (qty1, move[0].product_uom_qty) move[0].write({'product_uom_qty': qty1}) else: move =", "details. from odoo import api, models class ChangeProductionQty(models.TransientModel): _inherit = 'change.production.qty' @api.model def", "old_qty): modification = super(ChangeProductionQty, self)._update_product_to_produce(prod, qty, old_qty) for sub_product_line in prod.bom_id.sub_products: move =", "== sub_product_line and x.state not in ('done', 'cancel')) if move: product_uom_factor = prod.product_uom_id._compute_quantity(prod.product_qty", "= sub_product_line.product_qty qty1 *= product_uom_factor / prod.bom_id.product_qty modification[move[0]] = (qty1, move[0].product_uom_qty) move[0].write({'product_uom_qty': qty1})", "qty, old_qty): modification = super(ChangeProductionQty, self)._update_product_to_produce(prod, qty, old_qty) for sub_product_line in prod.bom_id.sub_products: move", "coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright", "api, models class ChangeProductionQty(models.TransientModel): _inherit = 'change.production.qty' @api.model def _update_product_to_produce(self, prod, qty, old_qty):", "in prod.bom_id.sub_products: move = prod.move_finished_ids.filtered(lambda x: x.subproduct_id == sub_product_line and x.state not in", "prod.bom_id.product_qty modification[move[0]] = (qty1, move[0].product_uom_qty) move[0].write({'product_uom_qty': qty1}) else: move = prod._create_byproduct_move(sub_product_line) modification[move] =", "= prod.move_finished_ids.filtered(lambda x: x.subproduct_id == sub_product_line and x.state not in ('done', 'cancel')) if", "models class ChangeProductionQty(models.TransientModel): _inherit = 'change.production.qty' @api.model def _update_product_to_produce(self, prod, qty, old_qty): modification", "import api, models class ChangeProductionQty(models.TransientModel): _inherit = 'change.production.qty' @api.model def _update_product_to_produce(self, prod, qty,", "-*- # Part of Odoo. See LICENSE file for full copyright and licensing", "prod.move_finished_ids.filtered(lambda x: x.subproduct_id == sub_product_line and x.state not in ('done', 'cancel')) if move:", "qty, old_qty) for sub_product_line in prod.bom_id.sub_products: move = prod.move_finished_ids.filtered(lambda x: x.subproduct_id == sub_product_line", "sub_product_line in prod.bom_id.sub_products: move = prod.move_finished_ids.filtered(lambda x: x.subproduct_id == sub_product_line and x.state not", "LICENSE file for full copyright and licensing details. from odoo import api, models", "copyright and licensing details. from odoo import api, models class ChangeProductionQty(models.TransientModel): _inherit =", "_update_product_to_produce(self, prod, qty, old_qty): modification = super(ChangeProductionQty, self)._update_product_to_produce(prod, qty, old_qty) for sub_product_line in", "= prod.product_uom_id._compute_quantity(prod.product_qty - prod.qty_produced, prod.bom_id.product_uom_id) qty1 = sub_product_line.product_qty qty1 *= product_uom_factor / prod.bom_id.product_qty", "for full copyright and licensing details. from odoo import api, models class ChangeProductionQty(models.TransientModel):", "sub_product_line and x.state not in ('done', 'cancel')) if move: product_uom_factor = prod.product_uom_id._compute_quantity(prod.product_qty -", "super(ChangeProductionQty, self)._update_product_to_produce(prod, qty, old_qty) for sub_product_line in prod.bom_id.sub_products: move = prod.move_finished_ids.filtered(lambda x: x.subproduct_id", "# Part of Odoo. See LICENSE file for full copyright and licensing details.", "@api.model def _update_product_to_produce(self, prod, qty, old_qty): modification = super(ChangeProductionQty, self)._update_product_to_produce(prod, qty, old_qty) for", "in ('done', 'cancel')) if move: product_uom_factor = prod.product_uom_id._compute_quantity(prod.product_qty - prod.qty_produced, prod.bom_id.product_uom_id) qty1 =", "_inherit = 'change.production.qty' @api.model def _update_product_to_produce(self, prod, qty, old_qty): modification = super(ChangeProductionQty, self)._update_product_to_produce(prod,", "(qty1, move[0].product_uom_qty) move[0].write({'product_uom_qty': qty1}) else: move = prod._create_byproduct_move(sub_product_line) modification[move] = (move.product_uom_qty, 0) return", "-*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full", "prod, qty, old_qty): modification = super(ChangeProductionQty, self)._update_product_to_produce(prod, qty, old_qty) for sub_product_line in prod.bom_id.sub_products:", "self)._update_product_to_produce(prod, qty, old_qty) for sub_product_line in prod.bom_id.sub_products: move = prod.move_finished_ids.filtered(lambda x: x.subproduct_id ==", "file for full copyright and licensing details. from odoo import api, models class", "licensing details. from odoo import api, models class ChangeProductionQty(models.TransientModel): _inherit = 'change.production.qty' @api.model", "full copyright and licensing details. from odoo import api, models class ChangeProductionQty(models.TransientModel): _inherit", "/ prod.bom_id.product_qty modification[move[0]] = (qty1, move[0].product_uom_qty) move[0].write({'product_uom_qty': qty1}) else: move = prod._create_byproduct_move(sub_product_line) modification[move]", "= 'change.production.qty' @api.model def _update_product_to_produce(self, prod, qty, old_qty): modification = super(ChangeProductionQty, self)._update_product_to_produce(prod, qty,", "prod.product_uom_id._compute_quantity(prod.product_qty - prod.qty_produced, prod.bom_id.product_uom_id) qty1 = sub_product_line.product_qty qty1 *= product_uom_factor / prod.bom_id.product_qty modification[move[0]]", "odoo import api, models class ChangeProductionQty(models.TransientModel): _inherit = 'change.production.qty' @api.model def _update_product_to_produce(self, prod,", "move[0].product_uom_qty) move[0].write({'product_uom_qty': qty1}) else: move = prod._create_byproduct_move(sub_product_line) modification[move] = (move.product_uom_qty, 0) return modification", "utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and", "Part of Odoo. See LICENSE file for full copyright and licensing details. from", "and licensing details. from odoo import api, models class ChangeProductionQty(models.TransientModel): _inherit = 'change.production.qty'", "# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for", "move = prod.move_finished_ids.filtered(lambda x: x.subproduct_id == sub_product_line and x.state not in ('done', 'cancel'))", "= (qty1, move[0].product_uom_qty) move[0].write({'product_uom_qty': qty1}) else: move = prod._create_byproduct_move(sub_product_line) modification[move] = (move.product_uom_qty, 0)", "for sub_product_line in prod.bom_id.sub_products: move = prod.move_finished_ids.filtered(lambda x: x.subproduct_id == sub_product_line and x.state", "sub_product_line.product_qty qty1 *= product_uom_factor / prod.bom_id.product_qty modification[move[0]] = (qty1, move[0].product_uom_qty) move[0].write({'product_uom_qty': qty1}) else:", "x: x.subproduct_id == sub_product_line and x.state not in ('done', 'cancel')) if move: product_uom_factor", "qty1 = sub_product_line.product_qty qty1 *= product_uom_factor / prod.bom_id.product_qty modification[move[0]] = (qty1, move[0].product_uom_qty) move[0].write({'product_uom_qty':", "See LICENSE file for full copyright and licensing details. from odoo import api,", "prod.bom_id.product_uom_id) qty1 = sub_product_line.product_qty qty1 *= product_uom_factor / prod.bom_id.product_qty modification[move[0]] = (qty1, move[0].product_uom_qty)", "not in ('done', 'cancel')) if move: product_uom_factor = prod.product_uom_id._compute_quantity(prod.product_qty - prod.qty_produced, prod.bom_id.product_uom_id) qty1", "- prod.qty_produced, prod.bom_id.product_uom_id) qty1 = sub_product_line.product_qty qty1 *= product_uom_factor / prod.bom_id.product_qty modification[move[0]] =", "('done', 'cancel')) if move: product_uom_factor = prod.product_uom_id._compute_quantity(prod.product_qty - prod.qty_produced, prod.bom_id.product_uom_id) qty1 = sub_product_line.product_qty", "if move: product_uom_factor = prod.product_uom_id._compute_quantity(prod.product_qty - prod.qty_produced, prod.bom_id.product_uom_id) qty1 = sub_product_line.product_qty qty1 *=", "prod.bom_id.sub_products: move = prod.move_finished_ids.filtered(lambda x: x.subproduct_id == sub_product_line and x.state not in ('done',", "of Odoo. See LICENSE file for full copyright and licensing details. from odoo", "x.subproduct_id == sub_product_line and x.state not in ('done', 'cancel')) if move: product_uom_factor =", "product_uom_factor / prod.bom_id.product_qty modification[move[0]] = (qty1, move[0].product_uom_qty) move[0].write({'product_uom_qty': qty1}) else: move = prod._create_byproduct_move(sub_product_line)", "modification[move[0]] = (qty1, move[0].product_uom_qty) move[0].write({'product_uom_qty': qty1}) else: move = prod._create_byproduct_move(sub_product_line) modification[move] = (move.product_uom_qty,", "'change.production.qty' @api.model def _update_product_to_produce(self, prod, qty, old_qty): modification = super(ChangeProductionQty, self)._update_product_to_produce(prod, qty, old_qty)", "old_qty) for sub_product_line in prod.bom_id.sub_products: move = prod.move_finished_ids.filtered(lambda x: x.subproduct_id == sub_product_line and", "class ChangeProductionQty(models.TransientModel): _inherit = 'change.production.qty' @api.model def _update_product_to_produce(self, prod, qty, old_qty): modification =", "x.state not in ('done', 'cancel')) if move: product_uom_factor = prod.product_uom_id._compute_quantity(prod.product_qty - prod.qty_produced, prod.bom_id.product_uom_id)", "ChangeProductionQty(models.TransientModel): _inherit = 'change.production.qty' @api.model def _update_product_to_produce(self, prod, qty, old_qty): modification = super(ChangeProductionQty,", "def _update_product_to_produce(self, prod, qty, old_qty): modification = super(ChangeProductionQty, self)._update_product_to_produce(prod, qty, old_qty) for sub_product_line", "move: product_uom_factor = prod.product_uom_id._compute_quantity(prod.product_qty - prod.qty_produced, prod.bom_id.product_uom_id) qty1 = sub_product_line.product_qty qty1 *= product_uom_factor", "modification = super(ChangeProductionQty, self)._update_product_to_produce(prod, qty, old_qty) for sub_product_line in prod.bom_id.sub_products: move = prod.move_finished_ids.filtered(lambda" ]
[]
[ "return np.array(out) def seqs_to_seq_of_prefix(seqs): out = [] for seq in seqs: out.append(seq[0]) return", "= np.split(np_sample, starting_chromosome_idxs)[1:] chromosomes_dists = np.split(np_dists, starting_chromosome_idxs)[1:] out = [] for i in", "= [] noised_seqs = [] for i in range(n_seqs): seq = np.array(model.sample(seqs_len)) seqs.append(seq)", "starting_seqs_idxs)[1:] out.append(seqs) return out def seqs_to_seq(seqs): out = [] for seq in seqs:", "[] for i in range(n_seqs): seq = np.array(model.sample(seqs_len)) seqs.append(seq) noised_seq = seq.copy() hits", "threshold): np_sample = np.array(sample) np_dists = np.array(dists_sample) starting_chromosome_idxs = np.where(np_dists >= 1e100)[0] chromosomes", "sample_numbers is None: sample_numbers = range(len(samples)) for i in sample_numbers: n = samples[i]", "1e100)[0] chromosomes = np.split(np_sample, starting_chromosome_idxs)[1:] chromosomes_dists = np.split(np_dists, starting_chromosome_idxs)[1:] out = [] for", "[] seqs_names = [] json_file = json.load(open(file_name)) samples = json_file[u'samples'] samples_to_seq = json_file[u'sampleToSequence']", "''' Returns logger object ''' logger = logging.getLogger(__name__) logger.setLevel(verbosity) return logger ################################################################################ #", "out_names.append(n) out_seqs.append(full_sample_to_chromosomes_seqs(samples_to_seq[n], samples_dists[n])) return zip(out_names, out_seqs) def get_full_sequences(file_name='data/nik-zainal2016-wgs-brca-mutations-for-hmm.json'): json_file = json.load(open(file_name)) samples =", "= np.random.permutation(samples) for i in range(n_seqs): seqs.append(samples_to_seq[samples[i]]) seqs_names.append(samples[i]) return seqs, seqs_names def to_json(file_name,", "for i in range(len(samples)): count_mat[i, samples_objects[i]] = samples_counts[i] return count_mat def get_samples_names(file_name='data/nik-zainal2016-wgs-brca-mutations-for-hmm.json'): json_file", "seqs, seqs_names def to_json(file_name, dict_to_save): with open(file_name + '.json', 'w') as fp: json.dump(dict_to_save,", "np.where(np.array(dists_sample) >= 1e100)[0] return np.split(np_sample, starting_chromosome_idxs)[1:] def load_json(file_name): return json.load(open(file_name)) def get_split_sequences(file_name, sample_numbers=None):", "i in range(len(chromosomes)): chromosome = chromosomes[i] chromosome_dists = chromosomes_dists[i] starting_seqs_idxs = np.where(chromosome_dists >=", "json_file = json.load(open(file_name)) samples = json_file[u'samples'] return samples def get_split_sequences_by_threshold(file_name, threshold, sample_numbers=None): json_file", "None: sample_numbers = range(len(samples)) for i in sample_numbers: n = samples[i] out_names.append(n) out_seqs.append(full_sample_to_chromosomes_seqs(samples_to_seq[n],", "= np.where(np.array(dists_sample) >= 1e100)[0] return np.split(np_sample, starting_chromosome_idxs)[1:] def load_json(file_name): return json.load(open(file_name)) def get_split_sequences(file_name,", "noise_change_dist = DiscreteDistribution(dict(zip(range(96), [1.0 / 96] * 96))) seqs = [] noised_seqs =", "np_dists = np.array(dists_sample) starting_chromosome_idxs = np.where(np_dists >= 1e100)[0] chromosomes = np.split(np_sample, starting_chromosome_idxs)[1:] chromosomes_dists", "def get_full_sequences(file_name='data/nik-zainal2016-wgs-brca-mutations-for-hmm.json'): json_file = json.load(open(file_name)) samples = json_file[u'samples'] samples_to_seq = json_file[u'sampleToSequence'] out_seqs =", "np from pomegranate import * import json ################################################################################ # LOGGING ################################################################################ import logging", "== 0: noised_seq[j] = noise_change_dist.sample() noised_seqs.append(noised_seq) return seqs, noised_seqs def get_emissions(file='data\\emissions_for_breast_cancer'): return np.load(file", "seq = np.array(model.sample(seqs_len)) seqs.append(seq) noised_seq = seq.copy() hits = noise_dist.sample(seqs_len) for j, hit", "= '%(asctime)s SigMa %(levelname)-10s: %(message)s' logging.basicConfig(format=FORMAT) def get_logger(verbosity=logging.INFO): ''' Returns logger object '''", "= json.load(open(file_name)) samples = json_file[u'samples'] samples_to_seq = json_file[u'sampleToSequence'] out_seqs = [] out_names =", "noised_seqs.append(noised_seq) return seqs, noised_seqs def get_emissions(file='data\\emissions_for_breast_cancer'): return np.load(file + '.npy') def sample_uniform_between_a_b(n_states, a=0.0,", "in range(n_seqs): seq = np.array(model.sample(seqs_len)) seqs.append(seq) noised_seq = seq.copy() hits = noise_dist.sample(seqs_len) for", "seqs = [] noised_seqs = [] for i in range(n_seqs): seq = np.array(model.sample(seqs_len))", "seqs.append(seq) noised_seq = seq.copy() hits = noise_dist.sample(seqs_len) for j, hit in enumerate(hits): if", "with open(file_name + '.json', 'w') as fp: json.dump(dict_to_save, fp) def full_sample_to_chromosomes_seqs(sample, dists_sample): np_sample", "= [] seqs_names = [] json_file = json.load(open(file_name)) samples = json_file[u'samples'] samples_to_seq =", "def random_seqs_from_json(file_name, n_seqs=10): seqs = [] seqs_names = [] json_file = json.load(open(file_name)) samples", "get_samples_names(file_name='data/nik-zainal2016-wgs-brca-mutations-for-hmm.json'): json_file = json.load(open(file_name)) samples = json_file[u'samples'] return samples def get_split_sequences_by_threshold(file_name, threshold, sample_numbers=None):", "threshold)) return zip(out_names, out_seqs) def full_sample_to_chromosomes_seqs_by_threshold(sample, dists_sample, threshold): np_sample = np.array(sample) np_dists =", "objects, counts = np.unique(samples_to_seq[sample], return_counts=True) samples_objects.append(objects) samples_counts.append(counts) num_objects = max(num_objects, np.max(objects)) num_objects +=", "sample_numbers = range(len(samples)) for i in sample_numbers: n = samples[i] out_names.append(n) out_seqs.append(full_sample_to_chromosomes_seqs(samples_to_seq[n], samples_dists[n]))", "samples def get_split_sequences_by_threshold(file_name, threshold, sample_numbers=None): json_file = json.load(open(file_name)) samples = json_file[u'samples'] samples_to_seq =", "n in samples: out_names.append(n) out_seqs.append(samples_to_seq[n]) return zip(out_names, out_seqs) def get_count_sequences_as_mat(file_name='data/nik-zainal2016-wgs-brca-mutations-for-hmm.json'): json_file = json.load(open(file_name))", "logging.basicConfig(format=FORMAT) def get_logger(verbosity=logging.INFO): ''' Returns logger object ''' logger = logging.getLogger(__name__) logger.setLevel(verbosity) return", "return seqs, noised_seqs def get_emissions(file='data\\emissions_for_breast_cancer'): return np.load(file + '.npy') def sample_uniform_between_a_b(n_states, a=0.0, b=1.0):", "[] for i in range(len(samples)): if samples[i] not in samples_in_dir: missing_indices.append(i) return missing_indices", "samples_to_seq = json_file[u'sampleToSequence'] # finding num_object + counting num_objects = 0 samples_objects =", "return_counts=True) samples_objects.append(objects) samples_counts.append(counts) num_objects = max(num_objects, np.max(objects)) num_objects += 1 count_mat = np.zeros((len(samples),", "logging # Logging format FORMAT = '%(asctime)s SigMa %(levelname)-10s: %(message)s' logging.basicConfig(format=FORMAT) def get_logger(verbosity=logging.INFO):", "import numpy as np from pomegranate import * import json ################################################################################ # LOGGING", "# LOGGING ################################################################################ import logging # Logging format FORMAT = '%(asctime)s SigMa %(levelname)-10s:", "get_split_sequences_by_threshold(file_name, threshold, sample_numbers=None): json_file = json.load(open(file_name)) samples = json_file[u'samples'] samples_to_seq = json_file[u'sampleToSequence'] samples_dists", "noised_seq[j] = noise_change_dist.sample() noised_seqs.append(noised_seq) return seqs, noised_seqs def get_emissions(file='data\\emissions_for_breast_cancer'): return np.load(file + '.npy')", "return np.load(file + '.npy') def sample_uniform_between_a_b(n_states, a=0.0, b=1.0): return (b - a) *", "'.json', 'w') as fp: json.dump(dict_to_save, fp) def full_sample_to_chromosomes_seqs(sample, dists_sample): np_sample = np.array(sample) starting_chromosome_idxs", "np.split(np_sample, starting_chromosome_idxs)[1:] def load_json(file_name): return json.load(open(file_name)) def get_split_sequences(file_name, sample_numbers=None): json_file = json.load(open(file_name)) samples", "sample_numbers: n = samples[i] out_names.append(n) out_seqs.append(full_sample_to_chromosomes_seqs_by_threshold(samples_to_seq[n], samples_dists[n], threshold)) return zip(out_names, out_seqs) def full_sample_to_chromosomes_seqs_by_threshold(sample,", "out_names.append(n) out_seqs.append(samples_to_seq[n]) return zip(out_names, out_seqs) def get_count_sequences_as_mat(file_name='data/nik-zainal2016-wgs-brca-mutations-for-hmm.json'): json_file = json.load(open(file_name)) samples = json_file[u'samples']", "import json ################################################################################ # LOGGING ################################################################################ import logging # Logging format FORMAT =", "np.where(np_dists >= 1e100)[0] chromosomes = np.split(np_sample, starting_chromosome_idxs)[1:] chromosomes_dists = np.split(np_dists, starting_chromosome_idxs)[1:] out =", "seqs, noised_seqs def get_emissions(file='data\\emissions_for_breast_cancer'): return np.load(file + '.npy') def sample_uniform_between_a_b(n_states, a=0.0, b=1.0): return", "samples = json_file[u'samples'] samples_to_seq = json_file[u'sampleToSequence'] # finding num_object + counting num_objects =", "starting_chromosome_idxs)[1:] chromosomes_dists = np.split(np_dists, starting_chromosome_idxs)[1:] out = [] for i in range(len(chromosomes)): chromosome", "json_file = json.load(open(file_name)) samples = json_file[u'samples'] samples_to_seq = json_file[u'sampleToSequence'] samples = np.random.permutation(samples) for", "range(len(samples)) for i in sample_numbers: n = samples[i] out_names.append(n) out_seqs.append(full_sample_to_chromosomes_seqs(samples_to_seq[n], samples_dists[n])) return zip(out_names,", "= json_file[u'sampleToSequence'] samples = np.random.permutation(samples) for i in range(n_seqs): seqs.append(samples_to_seq[samples[i]]) seqs_names.append(samples[i]) return seqs,", "sample_numbers=None): json_file = json.load(open(file_name)) samples = json_file[u'samples'] samples_to_seq = json_file[u'sampleToSequence'] samples_dists = json_file[u'sampleToPrevMutDists']", "seqs_names = [] json_file = json.load(open(file_name)) samples = json_file[u'samples'] samples_to_seq = json_file[u'sampleToSequence'] samples", "np_sample = np.array(sample) starting_chromosome_idxs = np.where(np.array(dists_sample) >= 1e100)[0] return np.split(np_sample, starting_chromosome_idxs)[1:] def load_json(file_name):", "n = samples[i] out_names.append(n) out_seqs.append(full_sample_to_chromosomes_seqs(samples_to_seq[n], samples_dists[n])) return zip(out_names, out_seqs) def get_full_sequences(file_name='data/nik-zainal2016-wgs-brca-mutations-for-hmm.json'): json_file =", "samples_counts[i] return count_mat def get_samples_names(file_name='data/nik-zainal2016-wgs-brca-mutations-for-hmm.json'): json_file = json.load(open(file_name)) samples = json_file[u'samples'] return samples", "out = [] for i in range(len(chromosomes)): chromosome = chromosomes[i] chromosome_dists = chromosomes_dists[i]", "json_file[u'sampleToSequence'] out_seqs = [] out_names = [] for n in samples: out_names.append(n) out_seqs.append(samples_to_seq[n])", "out = [] for seq in seqs: out.append(seq[0]) return np.array(out) def sample_indices_not_in_dir(dir_path): import", "load_json(file_name): return json.load(open(file_name)) def get_split_sequences(file_name, sample_numbers=None): json_file = json.load(open(file_name)) samples = json_file[u'samples'] samples_to_seq", "full_sample_to_chromosomes_seqs(sample, dists_sample): np_sample = np.array(sample) starting_chromosome_idxs = np.where(np.array(dists_sample) >= 1e100)[0] return np.split(np_sample, starting_chromosome_idxs)[1:]", "= json_file[u'sampleToPrevMutDists'] out_seqs = [] out_names = [] if sample_numbers is None: sample_numbers", "starting_seqs_idxs = np.where(chromosome_dists >= threshold)[0] seqs = np.split(chromosome, starting_seqs_idxs)[1:] out.append(seqs) return out def", "= noise_dist.sample(seqs_len) for j, hit in enumerate(hits): if hit == 0: noised_seq[j] =", "range(n_seqs): seq = np.array(model.sample(seqs_len)) seqs.append(seq) noised_seq = seq.copy() hits = noise_dist.sample(seqs_len) for j,", "i in sample_numbers: n = samples[i] out_names.append(n) out_seqs.append(full_sample_to_chromosomes_seqs_by_threshold(samples_to_seq[n], samples_dists[n], threshold)) return zip(out_names, out_seqs)", "= [] for i in range(len(chromosomes)): chromosome = chromosomes[i] chromosome_dists = chromosomes_dists[i] starting_seqs_idxs", ">= 1e100)[0] return np.split(np_sample, starting_chromosome_idxs)[1:] def load_json(file_name): return json.load(open(file_name)) def get_split_sequences(file_name, sample_numbers=None): json_file", "= json_file[u'sampleToSequence'] # finding num_object + counting num_objects = 0 samples_objects = []", "for i in range(n_seqs): seqs.append(samples_to_seq[samples[i]]) seqs_names.append(samples[i]) return seqs, seqs_names def to_json(file_name, dict_to_save): with", "return json.load(open(file_name)) def get_split_sequences(file_name, sample_numbers=None): json_file = json.load(open(file_name)) samples = json_file[u'samples'] samples_to_seq =", "get_logger(verbosity=logging.INFO): ''' Returns logger object ''' logger = logging.getLogger(__name__) logger.setLevel(verbosity) return logger ################################################################################", "def get_split_sequences_by_threshold(file_name, threshold, sample_numbers=None): json_file = json.load(open(file_name)) samples = json_file[u'samples'] samples_to_seq = json_file[u'sampleToSequence']", "[] for seq in seqs: out.append(seq[0]) return np.array(out) def sample_indices_not_in_dir(dir_path): import os samples_in_dir", "0: noised_seq[j] = noise_change_dist.sample() noised_seqs.append(noised_seq) return seqs, noised_seqs def get_emissions(file='data\\emissions_for_breast_cancer'): return np.load(file +", "out_seqs.append(full_sample_to_chromosomes_seqs(samples_to_seq[n], samples_dists[n])) return zip(out_names, out_seqs) def get_full_sequences(file_name='data/nik-zainal2016-wgs-brca-mutations-for-hmm.json'): json_file = json.load(open(file_name)) samples = json_file[u'samples']", "np.array(sample) np_dists = np.array(dists_sample) starting_chromosome_idxs = np.where(np_dists >= 1e100)[0] chromosomes = np.split(np_sample, starting_chromosome_idxs)[1:]", "chromosomes_dists = np.split(np_dists, starting_chromosome_idxs)[1:] out = [] for i in range(len(chromosomes)): chromosome =", "starting_chromosome_idxs)[1:] def load_json(file_name): return json.load(open(file_name)) def get_split_sequences(file_name, sample_numbers=None): json_file = json.load(open(file_name)) samples =", "count_mat def get_samples_names(file_name='data/nik-zainal2016-wgs-brca-mutations-for-hmm.json'): json_file = json.load(open(file_name)) samples = json_file[u'samples'] return samples def get_split_sequences_by_threshold(file_name,", "np_sample = np.array(sample) np_dists = np.array(dists_sample) starting_chromosome_idxs = np.where(np_dists >= 1e100)[0] chromosomes =", "out_seqs = [] out_names = [] if sample_numbers is None: sample_numbers = range(len(samples))", "for j, hit in enumerate(hits): if hit == 0: noised_seq[j] = noise_change_dist.sample() noised_seqs.append(noised_seq)", "################################################################################ import logging # Logging format FORMAT = '%(asctime)s SigMa %(levelname)-10s: %(message)s' logging.basicConfig(format=FORMAT)", "[] out_names = [] if sample_numbers is None: sample_numbers = range(len(samples)) for i", "1 count_mat = np.zeros((len(samples), num_objects)) for i in range(len(samples)): count_mat[i, samples_objects[i]] = samples_counts[i]", "%(levelname)-10s: %(message)s' logging.basicConfig(format=FORMAT) def get_logger(verbosity=logging.INFO): ''' Returns logger object ''' logger = logging.getLogger(__name__)", "samples_dists = json_file[u'sampleToPrevMutDists'] out_seqs = [] out_names = [] if sample_numbers is None:", "samples_in_dir = [f[:-5] for f in os.listdir(dir_path)] samples = get_samples_names() missing_indices = []", "UTILS ################################################################################ def sample_and_noise(model, noise_dist, n_seqs, seqs_len): noise_change_dist = DiscreteDistribution(dict(zip(range(96), [1.0 / 96]", "hits = noise_dist.sample(seqs_len) for j, hit in enumerate(hits): if hit == 0: noised_seq[j]", "noised_seqs = [] for i in range(n_seqs): seq = np.array(model.sample(seqs_len)) seqs.append(seq) noised_seq =", "[] if sample_numbers is None: sample_numbers = range(len(samples)) for i in sample_numbers: n", "is None: sample_numbers = range(len(samples)) for i in sample_numbers: n = samples[i] out_names.append(n)", "missing_indices = [] for i in range(len(samples)): if samples[i] not in samples_in_dir: missing_indices.append(i)", "= json.load(open(file_name)) samples = json_file[u'samples'] samples_to_seq = json_file[u'sampleToSequence'] samples = np.random.permutation(samples) for i", "zip(out_names, out_seqs) def get_count_sequences_as_mat(file_name='data/nik-zainal2016-wgs-brca-mutations-for-hmm.json'): json_file = json.load(open(file_name)) samples = json_file[u'samples'] samples_to_seq = json_file[u'sampleToSequence']", "[] for sample in samples: objects, counts = np.unique(samples_to_seq[sample], return_counts=True) samples_objects.append(objects) samples_counts.append(counts) num_objects", "samples_objects = [] samples_counts = [] for sample in samples: objects, counts =", "num_objects = 0 samples_objects = [] samples_counts = [] for sample in samples:", "fp: json.dump(dict_to_save, fp) def full_sample_to_chromosomes_seqs(sample, dists_sample): np_sample = np.array(sample) starting_chromosome_idxs = np.where(np.array(dists_sample) >=", "for i in sample_numbers: n = samples[i] out_names.append(n) out_seqs.append(full_sample_to_chromosomes_seqs(samples_to_seq[n], samples_dists[n])) return zip(out_names, out_seqs)", "[] samples_counts = [] for sample in samples: objects, counts = np.unique(samples_to_seq[sample], return_counts=True)", "for i in range(n_seqs): seq = np.array(model.sample(seqs_len)) seqs.append(seq) noised_seq = seq.copy() hits =", "# Logging format FORMAT = '%(asctime)s SigMa %(levelname)-10s: %(message)s' logging.basicConfig(format=FORMAT) def get_logger(verbosity=logging.INFO): '''", "np.array(sample) starting_chromosome_idxs = np.where(np.array(dists_sample) >= 1e100)[0] return np.split(np_sample, starting_chromosome_idxs)[1:] def load_json(file_name): return json.load(open(file_name))", "i in range(n_seqs): seqs.append(samples_to_seq[samples[i]]) seqs_names.append(samples[i]) return seqs, seqs_names def to_json(file_name, dict_to_save): with open(file_name", "in range(len(chromosomes)): chromosome = chromosomes[i] chromosome_dists = chromosomes_dists[i] starting_seqs_idxs = np.where(chromosome_dists >= threshold)[0]", "np.random.permutation(samples) for i in range(n_seqs): seqs.append(samples_to_seq[samples[i]]) seqs_names.append(samples[i]) return seqs, seqs_names def to_json(file_name, dict_to_save):", "def get_count_sequences_as_mat(file_name='data/nik-zainal2016-wgs-brca-mutations-for-hmm.json'): json_file = json.load(open(file_name)) samples = json_file[u'samples'] samples_to_seq = json_file[u'sampleToSequence'] # finding", "num_objects)) for i in range(len(samples)): count_mat[i, samples_objects[i]] = samples_counts[i] return count_mat def get_samples_names(file_name='data/nik-zainal2016-wgs-brca-mutations-for-hmm.json'):", "def sample_uniform_between_a_b(n_states, a=0.0, b=1.0): return (b - a) * np.random.sample(n_states) + a def", "b=1.0): return (b - a) * np.random.sample(n_states) + a def random_seqs_from_json(file_name, n_seqs=10): seqs", "return seqs, seqs_names def to_json(file_name, dict_to_save): with open(file_name + '.json', 'w') as fp:", "open(file_name + '.json', 'w') as fp: json.dump(dict_to_save, fp) def full_sample_to_chromosomes_seqs(sample, dists_sample): np_sample =", "samples = json_file[u'samples'] samples_to_seq = json_file[u'sampleToSequence'] samples = np.random.permutation(samples) for i in range(n_seqs):", "def seqs_to_seq(seqs): out = [] for seq in seqs: out.extend(seq) return np.array(out) def", "out_seqs.append(full_sample_to_chromosomes_seqs_by_threshold(samples_to_seq[n], samples_dists[n], threshold)) return zip(out_names, out_seqs) def full_sample_to_chromosomes_seqs_by_threshold(sample, dists_sample, threshold): np_sample = np.array(sample)", "################################################################################ # UTILS ################################################################################ def sample_and_noise(model, noise_dist, n_seqs, seqs_len): noise_change_dist = DiscreteDistribution(dict(zip(range(96), [1.0", "return samples def get_split_sequences_by_threshold(file_name, threshold, sample_numbers=None): json_file = json.load(open(file_name)) samples = json_file[u'samples'] samples_to_seq", "np.split(np_dists, starting_chromosome_idxs)[1:] out = [] for i in range(len(chromosomes)): chromosome = chromosomes[i] chromosome_dists", "= np.unique(samples_to_seq[sample], return_counts=True) samples_objects.append(objects) samples_counts.append(counts) num_objects = max(num_objects, np.max(objects)) num_objects += 1 count_mat", "sample_numbers: n = samples[i] out_names.append(n) out_seqs.append(full_sample_to_chromosomes_seqs(samples_to_seq[n], samples_dists[n])) return zip(out_names, out_seqs) def get_full_sequences(file_name='data/nik-zainal2016-wgs-brca-mutations-for-hmm.json'): json_file", "counting num_objects = 0 samples_objects = [] samples_counts = [] for sample in", "for i in sample_numbers: n = samples[i] out_names.append(n) out_seqs.append(full_sample_to_chromosomes_seqs_by_threshold(samples_to_seq[n], samples_dists[n], threshold)) return zip(out_names,", "np.where(chromosome_dists >= threshold)[0] seqs = np.split(chromosome, starting_seqs_idxs)[1:] out.append(seqs) return out def seqs_to_seq(seqs): out", "= [] for i in range(len(samples)): if samples[i] not in samples_in_dir: missing_indices.append(i) return", "noise_dist.sample(seqs_len) for j, hit in enumerate(hits): if hit == 0: noised_seq[j] = noise_change_dist.sample()", "= json_file[u'samples'] samples_to_seq = json_file[u'sampleToSequence'] samples_dists = json_file[u'sampleToPrevMutDists'] out_seqs = [] out_names =", "starting_chromosome_idxs)[1:] out = [] for i in range(len(chromosomes)): chromosome = chromosomes[i] chromosome_dists =", "sample in samples: objects, counts = np.unique(samples_to_seq[sample], return_counts=True) samples_objects.append(objects) samples_counts.append(counts) num_objects = max(num_objects,", "= [] for seq in seqs: out.extend(seq) return np.array(out) def seqs_to_seq_of_prefix(seqs): out =", "logger = logging.getLogger(__name__) logger.setLevel(verbosity) return logger ################################################################################ # UTILS ################################################################################ def sample_and_noise(model, noise_dist,", "range(n_seqs): seqs.append(samples_to_seq[samples[i]]) seqs_names.append(samples[i]) return seqs, seqs_names def to_json(file_name, dict_to_save): with open(file_name + '.json',", "count_mat = np.zeros((len(samples), num_objects)) for i in range(len(samples)): count_mat[i, samples_objects[i]] = samples_counts[i] return", "chromosomes = np.split(np_sample, starting_chromosome_idxs)[1:] chromosomes_dists = np.split(np_dists, starting_chromosome_idxs)[1:] out = [] for i", "= json.load(open(file_name)) samples = json_file[u'samples'] return samples def get_split_sequences_by_threshold(file_name, threshold, sample_numbers=None): json_file =", "chromosome = chromosomes[i] chromosome_dists = chromosomes_dists[i] starting_seqs_idxs = np.where(chromosome_dists >= threshold)[0] seqs =", "out = [] for seq in seqs: out.extend(seq) return np.array(out) def seqs_to_seq_of_prefix(seqs): out", "return out def seqs_to_seq(seqs): out = [] for seq in seqs: out.extend(seq) return", "[] for seq in seqs: out.extend(seq) return np.array(out) def seqs_to_seq_of_prefix(seqs): out = []", "[] json_file = json.load(open(file_name)) samples = json_file[u'samples'] samples_to_seq = json_file[u'sampleToSequence'] samples = np.random.permutation(samples)", "[] for n in samples: out_names.append(n) out_seqs.append(samples_to_seq[n]) return zip(out_names, out_seqs) def get_count_sequences_as_mat(file_name='data/nik-zainal2016-wgs-brca-mutations-for-hmm.json'): json_file", "= chromosomes[i] chromosome_dists = chromosomes_dists[i] starting_seqs_idxs = np.where(chromosome_dists >= threshold)[0] seqs = np.split(chromosome,", "json_file[u'samples'] samples_to_seq = json_file[u'sampleToSequence'] samples_dists = json_file[u'sampleToPrevMutDists'] out_seqs = [] out_names = []", "numpy as np from pomegranate import * import json ################################################################################ # LOGGING ################################################################################", "samples[i] out_names.append(n) out_seqs.append(full_sample_to_chromosomes_seqs(samples_to_seq[n], samples_dists[n])) return zip(out_names, out_seqs) def get_full_sequences(file_name='data/nik-zainal2016-wgs-brca-mutations-for-hmm.json'): json_file = json.load(open(file_name)) samples", "= samples[i] out_names.append(n) out_seqs.append(full_sample_to_chromosomes_seqs_by_threshold(samples_to_seq[n], samples_dists[n], threshold)) return zip(out_names, out_seqs) def full_sample_to_chromosomes_seqs_by_threshold(sample, dists_sample, threshold):", "np.array(model.sample(seqs_len)) seqs.append(seq) noised_seq = seq.copy() hits = noise_dist.sample(seqs_len) for j, hit in enumerate(hits):", "[f[:-5] for f in os.listdir(dir_path)] samples = get_samples_names() missing_indices = [] for i", "json_file[u'sampleToSequence'] samples = np.random.permutation(samples) for i in range(n_seqs): seqs.append(samples_to_seq[samples[i]]) seqs_names.append(samples[i]) return seqs, seqs_names", "= get_samples_names() missing_indices = [] for i in range(len(samples)): if samples[i] not in", "os.listdir(dir_path)] samples = get_samples_names() missing_indices = [] for i in range(len(samples)): if samples[i]", "dists_sample): np_sample = np.array(sample) starting_chromosome_idxs = np.where(np.array(dists_sample) >= 1e100)[0] return np.split(np_sample, starting_chromosome_idxs)[1:] def", "full_sample_to_chromosomes_seqs_by_threshold(sample, dists_sample, threshold): np_sample = np.array(sample) np_dists = np.array(dists_sample) starting_chromosome_idxs = np.where(np_dists >=", "out.extend(seq) return np.array(out) def seqs_to_seq_of_prefix(seqs): out = [] for seq in seqs: out.append(seq[0])", "= json_file[u'samples'] samples_to_seq = json_file[u'sampleToSequence'] # finding num_object + counting num_objects = 0", "sample_numbers = range(len(samples)) for i in sample_numbers: n = samples[i] out_names.append(n) out_seqs.append(full_sample_to_chromosomes_seqs_by_threshold(samples_to_seq[n], samples_dists[n],", "logger object ''' logger = logging.getLogger(__name__) logger.setLevel(verbosity) return logger ################################################################################ # UTILS ################################################################################", "chromosome_dists = chromosomes_dists[i] starting_seqs_idxs = np.where(chromosome_dists >= threshold)[0] seqs = np.split(chromosome, starting_seqs_idxs)[1:] out.append(seqs)", "return np.array(out) def sample_indices_not_in_dir(dir_path): import os samples_in_dir = [f[:-5] for f in os.listdir(dir_path)]", "json.load(open(file_name)) def get_split_sequences(file_name, sample_numbers=None): json_file = json.load(open(file_name)) samples = json_file[u'samples'] samples_to_seq = json_file[u'sampleToSequence']", "samples = np.random.permutation(samples) for i in range(n_seqs): seqs.append(samples_to_seq[samples[i]]) seqs_names.append(samples[i]) return seqs, seqs_names def", "num_objects += 1 count_mat = np.zeros((len(samples), num_objects)) for i in range(len(samples)): count_mat[i, samples_objects[i]]", "json.load(open(file_name)) samples = json_file[u'samples'] samples_to_seq = json_file[u'sampleToSequence'] samples = np.random.permutation(samples) for i in", "= [f[:-5] for f in os.listdir(dir_path)] samples = get_samples_names() missing_indices = [] for", "samples = json_file[u'samples'] return samples def get_split_sequences_by_threshold(file_name, threshold, sample_numbers=None): json_file = json.load(open(file_name)) samples", "for i in range(len(chromosomes)): chromosome = chromosomes[i] chromosome_dists = chromosomes_dists[i] starting_seqs_idxs = np.where(chromosome_dists", "for seq in seqs: out.append(seq[0]) return np.array(out) def sample_indices_not_in_dir(dir_path): import os samples_in_dir =", "json_file[u'samples'] samples_to_seq = json_file[u'sampleToSequence'] samples = np.random.permutation(samples) for i in range(n_seqs): seqs.append(samples_to_seq[samples[i]]) seqs_names.append(samples[i])", "= [] out_names = [] if sample_numbers is None: sample_numbers = range(len(samples)) for", "seqs: out.append(seq[0]) return np.array(out) def sample_indices_not_in_dir(dir_path): import os samples_in_dir = [f[:-5] for f", "seqs_len): noise_change_dist = DiscreteDistribution(dict(zip(range(96), [1.0 / 96] * 96))) seqs = [] noised_seqs", "enumerate(hits): if hit == 0: noised_seq[j] = noise_change_dist.sample() noised_seqs.append(noised_seq) return seqs, noised_seqs def", "seqs_to_seq_of_prefix(seqs): out = [] for seq in seqs: out.append(seq[0]) return np.array(out) def sample_indices_not_in_dir(dir_path):", "= np.array(model.sample(seqs_len)) seqs.append(seq) noised_seq = seq.copy() hits = noise_dist.sample(seqs_len) for j, hit in", "def sample_indices_not_in_dir(dir_path): import os samples_in_dir = [f[:-5] for f in os.listdir(dir_path)] samples =", "for seq in seqs: out.extend(seq) return np.array(out) def seqs_to_seq_of_prefix(seqs): out = [] for", "json.dump(dict_to_save, fp) def full_sample_to_chromosomes_seqs(sample, dists_sample): np_sample = np.array(sample) starting_chromosome_idxs = np.where(np.array(dists_sample) >= 1e100)[0]", "FORMAT = '%(asctime)s SigMa %(levelname)-10s: %(message)s' logging.basicConfig(format=FORMAT) def get_logger(verbosity=logging.INFO): ''' Returns logger object", ">= threshold)[0] seqs = np.split(chromosome, starting_seqs_idxs)[1:] out.append(seqs) return out def seqs_to_seq(seqs): out =", "= np.zeros((len(samples), num_objects)) for i in range(len(samples)): count_mat[i, samples_objects[i]] = samples_counts[i] return count_mat", "zip(out_names, out_seqs) def get_full_sequences(file_name='data/nik-zainal2016-wgs-brca-mutations-for-hmm.json'): json_file = json.load(open(file_name)) samples = json_file[u'samples'] samples_to_seq = json_file[u'sampleToSequence']", "* np.random.sample(n_states) + a def random_seqs_from_json(file_name, n_seqs=10): seqs = [] seqs_names = []", "seqs_to_seq(seqs): out = [] for seq in seqs: out.extend(seq) return np.array(out) def seqs_to_seq_of_prefix(seqs):", "= [] if sample_numbers is None: sample_numbers = range(len(samples)) for i in sample_numbers:", "SigMa %(levelname)-10s: %(message)s' logging.basicConfig(format=FORMAT) def get_logger(verbosity=logging.INFO): ''' Returns logger object ''' logger =", "np.zeros((len(samples), num_objects)) for i in range(len(samples)): count_mat[i, samples_objects[i]] = samples_counts[i] return count_mat def", "# finding num_object + counting num_objects = 0 samples_objects = [] samples_counts =", "def full_sample_to_chromosomes_seqs_by_threshold(sample, dists_sample, threshold): np_sample = np.array(sample) np_dists = np.array(dists_sample) starting_chromosome_idxs = np.where(np_dists", "counts = np.unique(samples_to_seq[sample], return_counts=True) samples_objects.append(objects) samples_counts.append(counts) num_objects = max(num_objects, np.max(objects)) num_objects += 1", "import os samples_in_dir = [f[:-5] for f in os.listdir(dir_path)] samples = get_samples_names() missing_indices", "= json_file[u'sampleToSequence'] samples_dists = json_file[u'sampleToPrevMutDists'] out_seqs = [] out_names = [] if sample_numbers", "dists_sample, threshold): np_sample = np.array(sample) np_dists = np.array(dists_sample) starting_chromosome_idxs = np.where(np_dists >= 1e100)[0]", "- a) * np.random.sample(n_states) + a def random_seqs_from_json(file_name, n_seqs=10): seqs = [] seqs_names", "samples = json_file[u'samples'] samples_to_seq = json_file[u'sampleToSequence'] samples_dists = json_file[u'sampleToPrevMutDists'] out_seqs = [] out_names", "pomegranate import * import json ################################################################################ # LOGGING ################################################################################ import logging # Logging", "in samples: objects, counts = np.unique(samples_to_seq[sample], return_counts=True) samples_objects.append(objects) samples_counts.append(counts) num_objects = max(num_objects, np.max(objects))", "samples_objects.append(objects) samples_counts.append(counts) num_objects = max(num_objects, np.max(objects)) num_objects += 1 count_mat = np.zeros((len(samples), num_objects))", "json ################################################################################ # LOGGING ################################################################################ import logging # Logging format FORMAT = '%(asctime)s", "n_seqs, seqs_len): noise_change_dist = DiscreteDistribution(dict(zip(range(96), [1.0 / 96] * 96))) seqs = []", "out_names = [] if sample_numbers is None: sample_numbers = range(len(samples)) for i in", "''' logger = logging.getLogger(__name__) logger.setLevel(verbosity) return logger ################################################################################ # UTILS ################################################################################ def sample_and_noise(model,", "return (b - a) * np.random.sample(n_states) + a def random_seqs_from_json(file_name, n_seqs=10): seqs =", "noise_dist, n_seqs, seqs_len): noise_change_dist = DiscreteDistribution(dict(zip(range(96), [1.0 / 96] * 96))) seqs =", "= [] samples_counts = [] for sample in samples: objects, counts = np.unique(samples_to_seq[sample],", "count_mat[i, samples_objects[i]] = samples_counts[i] return count_mat def get_samples_names(file_name='data/nik-zainal2016-wgs-brca-mutations-for-hmm.json'): json_file = json.load(open(file_name)) samples =", "format FORMAT = '%(asctime)s SigMa %(levelname)-10s: %(message)s' logging.basicConfig(format=FORMAT) def get_logger(verbosity=logging.INFO): ''' Returns logger", "logger.setLevel(verbosity) return logger ################################################################################ # UTILS ################################################################################ def sample_and_noise(model, noise_dist, n_seqs, seqs_len): noise_change_dist", "samples = json_file[u'samples'] samples_to_seq = json_file[u'sampleToSequence'] out_seqs = [] out_names = [] for", "chromosomes_dists[i] starting_seqs_idxs = np.where(chromosome_dists >= threshold)[0] seqs = np.split(chromosome, starting_seqs_idxs)[1:] out.append(seqs) return out", "= json_file[u'samples'] return samples def get_split_sequences_by_threshold(file_name, threshold, sample_numbers=None): json_file = json.load(open(file_name)) samples =", "seqs.append(samples_to_seq[samples[i]]) seqs_names.append(samples[i]) return seqs, seqs_names def to_json(file_name, dict_to_save): with open(file_name + '.json', 'w')", "np.array(out) def sample_indices_not_in_dir(dir_path): import os samples_in_dir = [f[:-5] for f in os.listdir(dir_path)] samples", "np.array(out) def seqs_to_seq_of_prefix(seqs): out = [] for seq in seqs: out.append(seq[0]) return np.array(out)", "json_file[u'sampleToSequence'] # finding num_object + counting num_objects = 0 samples_objects = [] samples_counts", "= json_file[u'samples'] samples_to_seq = json_file[u'sampleToSequence'] samples = np.random.permutation(samples) for i in range(n_seqs): seqs.append(samples_to_seq[samples[i]])", "def get_emissions(file='data\\emissions_for_breast_cancer'): return np.load(file + '.npy') def sample_uniform_between_a_b(n_states, a=0.0, b=1.0): return (b -", "np.random.sample(n_states) + a def random_seqs_from_json(file_name, n_seqs=10): seqs = [] seqs_names = [] json_file", "seqs = [] seqs_names = [] json_file = json.load(open(file_name)) samples = json_file[u'samples'] samples_to_seq", "out_seqs) def get_full_sequences(file_name='data/nik-zainal2016-wgs-brca-mutations-for-hmm.json'): json_file = json.load(open(file_name)) samples = json_file[u'samples'] samples_to_seq = json_file[u'sampleToSequence'] out_seqs", "= [] out_names = [] for n in samples: out_names.append(n) out_seqs.append(samples_to_seq[n]) return zip(out_names,", "in sample_numbers: n = samples[i] out_names.append(n) out_seqs.append(full_sample_to_chromosomes_seqs(samples_to_seq[n], samples_dists[n])) return zip(out_names, out_seqs) def get_full_sequences(file_name='data/nik-zainal2016-wgs-brca-mutations-for-hmm.json'):", "logging.getLogger(__name__) logger.setLevel(verbosity) return logger ################################################################################ # UTILS ################################################################################ def sample_and_noise(model, noise_dist, n_seqs, seqs_len):", "= DiscreteDistribution(dict(zip(range(96), [1.0 / 96] * 96))) seqs = [] noised_seqs = []", "if hit == 0: noised_seq[j] = noise_change_dist.sample() noised_seqs.append(noised_seq) return seqs, noised_seqs def get_emissions(file='data\\emissions_for_breast_cancer'):", "samples_to_seq = json_file[u'sampleToSequence'] out_seqs = [] out_names = [] for n in samples:", "out.append(seq[0]) return np.array(out) def sample_indices_not_in_dir(dir_path): import os samples_in_dir = [f[:-5] for f in", "os samples_in_dir = [f[:-5] for f in os.listdir(dir_path)] samples = get_samples_names() missing_indices =", "def load_json(file_name): return json.load(open(file_name)) def get_split_sequences(file_name, sample_numbers=None): json_file = json.load(open(file_name)) samples = json_file[u'samples']", "Returns logger object ''' logger = logging.getLogger(__name__) logger.setLevel(verbosity) return logger ################################################################################ # UTILS", "out_names = [] for n in samples: out_names.append(n) out_seqs.append(samples_to_seq[n]) return zip(out_names, out_seqs) def", "as fp: json.dump(dict_to_save, fp) def full_sample_to_chromosomes_seqs(sample, dists_sample): np_sample = np.array(sample) starting_chromosome_idxs = np.where(np.array(dists_sample)", "np.split(np_sample, starting_chromosome_idxs)[1:] chromosomes_dists = np.split(np_dists, starting_chromosome_idxs)[1:] out = [] for i in range(len(chromosomes)):", "json.load(open(file_name)) samples = json_file[u'samples'] samples_to_seq = json_file[u'sampleToSequence'] # finding num_object + counting num_objects", "samples_dists[n])) return zip(out_names, out_seqs) def get_full_sequences(file_name='data/nik-zainal2016-wgs-brca-mutations-for-hmm.json'): json_file = json.load(open(file_name)) samples = json_file[u'samples'] samples_to_seq", "in range(len(samples)): count_mat[i, samples_objects[i]] = samples_counts[i] return count_mat def get_samples_names(file_name='data/nik-zainal2016-wgs-brca-mutations-for-hmm.json'): json_file = json.load(open(file_name))", "for f in os.listdir(dir_path)] samples = get_samples_names() missing_indices = [] for i in", "num_object + counting num_objects = 0 samples_objects = [] samples_counts = [] for", "noised_seqs def get_emissions(file='data\\emissions_for_breast_cancer'): return np.load(file + '.npy') def sample_uniform_between_a_b(n_states, a=0.0, b=1.0): return (b", "= samples[i] out_names.append(n) out_seqs.append(full_sample_to_chromosomes_seqs(samples_to_seq[n], samples_dists[n])) return zip(out_names, out_seqs) def get_full_sequences(file_name='data/nik-zainal2016-wgs-brca-mutations-for-hmm.json'): json_file = json.load(open(file_name))", "= seq.copy() hits = noise_dist.sample(seqs_len) for j, hit in enumerate(hits): if hit ==", "for sample in samples: objects, counts = np.unique(samples_to_seq[sample], return_counts=True) samples_objects.append(objects) samples_counts.append(counts) num_objects =", "out.append(seqs) return out def seqs_to_seq(seqs): out = [] for seq in seqs: out.extend(seq)", "= max(num_objects, np.max(objects)) num_objects += 1 count_mat = np.zeros((len(samples), num_objects)) for i in", "return logger ################################################################################ # UTILS ################################################################################ def sample_and_noise(model, noise_dist, n_seqs, seqs_len): noise_change_dist =", "starting_chromosome_idxs = np.where(np.array(dists_sample) >= 1e100)[0] return np.split(np_sample, starting_chromosome_idxs)[1:] def load_json(file_name): return json.load(open(file_name)) def", "samples_counts.append(counts) num_objects = max(num_objects, np.max(objects)) num_objects += 1 count_mat = np.zeros((len(samples), num_objects)) for", ">= 1e100)[0] chromosomes = np.split(np_sample, starting_chromosome_idxs)[1:] chromosomes_dists = np.split(np_dists, starting_chromosome_idxs)[1:] out = []", "samples_to_seq = json_file[u'sampleToSequence'] samples = np.random.permutation(samples) for i in range(n_seqs): seqs.append(samples_to_seq[samples[i]]) seqs_names.append(samples[i]) return", "in seqs: out.extend(seq) return np.array(out) def seqs_to_seq_of_prefix(seqs): out = [] for seq in", "threshold)[0] seqs = np.split(chromosome, starting_seqs_idxs)[1:] out.append(seqs) return out def seqs_to_seq(seqs): out = []", "[] out_names = [] for n in samples: out_names.append(n) out_seqs.append(samples_to_seq[n]) return zip(out_names, out_seqs)", "samples[i] out_names.append(n) out_seqs.append(full_sample_to_chromosomes_seqs_by_threshold(samples_to_seq[n], samples_dists[n], threshold)) return zip(out_names, out_seqs) def full_sample_to_chromosomes_seqs_by_threshold(sample, dists_sample, threshold): np_sample", "= json.load(open(file_name)) samples = json_file[u'samples'] samples_to_seq = json_file[u'sampleToSequence'] samples_dists = json_file[u'sampleToPrevMutDists'] out_seqs =", "sample_indices_not_in_dir(dir_path): import os samples_in_dir = [f[:-5] for f in os.listdir(dir_path)] samples = get_samples_names()", "json_file[u'samples'] samples_to_seq = json_file[u'sampleToSequence'] # finding num_object + counting num_objects = 0 samples_objects", "= np.where(chromosome_dists >= threshold)[0] seqs = np.split(chromosome, starting_seqs_idxs)[1:] out.append(seqs) return out def seqs_to_seq(seqs):", "= [] for seq in seqs: out.append(seq[0]) return np.array(out) def sample_indices_not_in_dir(dir_path): import os", "samples_dists[n], threshold)) return zip(out_names, out_seqs) def full_sample_to_chromosomes_seqs_by_threshold(sample, dists_sample, threshold): np_sample = np.array(sample) np_dists", "samples: out_names.append(n) out_seqs.append(samples_to_seq[n]) return zip(out_names, out_seqs) def get_count_sequences_as_mat(file_name='data/nik-zainal2016-wgs-brca-mutations-for-hmm.json'): json_file = json.load(open(file_name)) samples =", "out_seqs.append(samples_to_seq[n]) return zip(out_names, out_seqs) def get_count_sequences_as_mat(file_name='data/nik-zainal2016-wgs-brca-mutations-for-hmm.json'): json_file = json.load(open(file_name)) samples = json_file[u'samples'] samples_to_seq", "json.load(open(file_name)) samples = json_file[u'samples'] return samples def get_split_sequences_by_threshold(file_name, threshold, sample_numbers=None): json_file = json.load(open(file_name))", "range(len(samples)) for i in sample_numbers: n = samples[i] out_names.append(n) out_seqs.append(full_sample_to_chromosomes_seqs_by_threshold(samples_to_seq[n], samples_dists[n], threshold)) return", "%(message)s' logging.basicConfig(format=FORMAT) def get_logger(verbosity=logging.INFO): ''' Returns logger object ''' logger = logging.getLogger(__name__) logger.setLevel(verbosity)", "= json_file[u'sampleToSequence'] out_seqs = [] out_names = [] for n in samples: out_names.append(n)", "from pomegranate import * import json ################################################################################ # LOGGING ################################################################################ import logging #", "json_file = json.load(open(file_name)) samples = json_file[u'samples'] samples_to_seq = json_file[u'sampleToSequence'] samples_dists = json_file[u'sampleToPrevMutDists'] out_seqs", "in range(n_seqs): seqs.append(samples_to_seq[samples[i]]) seqs_names.append(samples[i]) return seqs, seqs_names def to_json(file_name, dict_to_save): with open(file_name +", "json.load(open(file_name)) samples = json_file[u'samples'] samples_to_seq = json_file[u'sampleToSequence'] out_seqs = [] out_names = []", "'w') as fp: json.dump(dict_to_save, fp) def full_sample_to_chromosomes_seqs(sample, dists_sample): np_sample = np.array(sample) starting_chromosome_idxs =", "threshold, sample_numbers=None): json_file = json.load(open(file_name)) samples = json_file[u'samples'] samples_to_seq = json_file[u'sampleToSequence'] samples_dists =", "seq.copy() hits = noise_dist.sample(seqs_len) for j, hit in enumerate(hits): if hit == 0:", "in samples: out_names.append(n) out_seqs.append(samples_to_seq[n]) return zip(out_names, out_seqs) def get_count_sequences_as_mat(file_name='data/nik-zainal2016-wgs-brca-mutations-for-hmm.json'): json_file = json.load(open(file_name)) samples", "out_names.append(n) out_seqs.append(full_sample_to_chromosomes_seqs_by_threshold(samples_to_seq[n], samples_dists[n], threshold)) return zip(out_names, out_seqs) def full_sample_to_chromosomes_seqs_by_threshold(sample, dists_sample, threshold): np_sample =", "np.array(dists_sample) starting_chromosome_idxs = np.where(np_dists >= 1e100)[0] chromosomes = np.split(np_sample, starting_chromosome_idxs)[1:] chromosomes_dists = np.split(np_dists,", "return zip(out_names, out_seqs) def get_full_sequences(file_name='data/nik-zainal2016-wgs-brca-mutations-for-hmm.json'): json_file = json.load(open(file_name)) samples = json_file[u'samples'] samples_to_seq =", "j, hit in enumerate(hits): if hit == 0: noised_seq[j] = noise_change_dist.sample() noised_seqs.append(noised_seq) return", "def get_samples_names(file_name='data/nik-zainal2016-wgs-brca-mutations-for-hmm.json'): json_file = json.load(open(file_name)) samples = json_file[u'samples'] return samples def get_split_sequences_by_threshold(file_name, threshold,", "a def random_seqs_from_json(file_name, n_seqs=10): seqs = [] seqs_names = [] json_file = json.load(open(file_name))", "0 samples_objects = [] samples_counts = [] for sample in samples: objects, counts", "= [] for n in samples: out_names.append(n) out_seqs.append(samples_to_seq[n]) return zip(out_names, out_seqs) def get_count_sequences_as_mat(file_name='data/nik-zainal2016-wgs-brca-mutations-for-hmm.json'):", "noised_seq = seq.copy() hits = noise_dist.sample(seqs_len) for j, hit in enumerate(hits): if hit", "= logging.getLogger(__name__) logger.setLevel(verbosity) return logger ################################################################################ # UTILS ################################################################################ def sample_and_noise(model, noise_dist, n_seqs,", "seqs_names.append(samples[i]) return seqs, seqs_names def to_json(file_name, dict_to_save): with open(file_name + '.json', 'w') as", "i in sample_numbers: n = samples[i] out_names.append(n) out_seqs.append(full_sample_to_chromosomes_seqs(samples_to_seq[n], samples_dists[n])) return zip(out_names, out_seqs) def", "noise_change_dist.sample() noised_seqs.append(noised_seq) return seqs, noised_seqs def get_emissions(file='data\\emissions_for_breast_cancer'): return np.load(file + '.npy') def sample_uniform_between_a_b(n_states,", "LOGGING ################################################################################ import logging # Logging format FORMAT = '%(asctime)s SigMa %(levelname)-10s: %(message)s'", "/ 96] * 96))) seqs = [] noised_seqs = [] for i in", "if sample_numbers is None: sample_numbers = range(len(samples)) for i in sample_numbers: n =", "[] for i in range(len(chromosomes)): chromosome = chromosomes[i] chromosome_dists = chromosomes_dists[i] starting_seqs_idxs =", "DiscreteDistribution(dict(zip(range(96), [1.0 / 96] * 96))) seqs = [] noised_seqs = [] for", "i in range(n_seqs): seq = np.array(model.sample(seqs_len)) seqs.append(seq) noised_seq = seq.copy() hits = noise_dist.sample(seqs_len)", "= samples_counts[i] return count_mat def get_samples_names(file_name='data/nik-zainal2016-wgs-brca-mutations-for-hmm.json'): json_file = json.load(open(file_name)) samples = json_file[u'samples'] return", "out def seqs_to_seq(seqs): out = [] for seq in seqs: out.extend(seq) return np.array(out)", "to_json(file_name, dict_to_save): with open(file_name + '.json', 'w') as fp: json.dump(dict_to_save, fp) def full_sample_to_chromosomes_seqs(sample,", "starting_chromosome_idxs = np.where(np_dists >= 1e100)[0] chromosomes = np.split(np_sample, starting_chromosome_idxs)[1:] chromosomes_dists = np.split(np_dists, starting_chromosome_idxs)[1:]", "1e100)[0] return np.split(np_sample, starting_chromosome_idxs)[1:] def load_json(file_name): return json.load(open(file_name)) def get_split_sequences(file_name, sample_numbers=None): json_file =", "samples = get_samples_names() missing_indices = [] for i in range(len(samples)): if samples[i] not", "= np.split(chromosome, starting_seqs_idxs)[1:] out.append(seqs) return out def seqs_to_seq(seqs): out = [] for seq", "seq in seqs: out.extend(seq) return np.array(out) def seqs_to_seq_of_prefix(seqs): out = [] for seq", "out_seqs = [] out_names = [] for n in samples: out_names.append(n) out_seqs.append(samples_to_seq[n]) return", "sample_and_noise(model, noise_dist, n_seqs, seqs_len): noise_change_dist = DiscreteDistribution(dict(zip(range(96), [1.0 / 96] * 96))) seqs", "num_objects = max(num_objects, np.max(objects)) num_objects += 1 count_mat = np.zeros((len(samples), num_objects)) for i", "+ '.json', 'w') as fp: json.dump(dict_to_save, fp) def full_sample_to_chromosomes_seqs(sample, dists_sample): np_sample = np.array(sample)", "zip(out_names, out_seqs) def full_sample_to_chromosomes_seqs_by_threshold(sample, dists_sample, threshold): np_sample = np.array(sample) np_dists = np.array(dists_sample) starting_chromosome_idxs", "def get_logger(verbosity=logging.INFO): ''' Returns logger object ''' logger = logging.getLogger(__name__) logger.setLevel(verbosity) return logger", "* 96))) seqs = [] noised_seqs = [] for i in range(n_seqs): seq", "[1.0 / 96] * 96))) seqs = [] noised_seqs = [] for i", "= range(len(samples)) for i in sample_numbers: n = samples[i] out_names.append(n) out_seqs.append(full_sample_to_chromosomes_seqs_by_threshold(samples_to_seq[n], samples_dists[n], threshold))", "[] noised_seqs = [] for i in range(n_seqs): seq = np.array(model.sample(seqs_len)) seqs.append(seq) noised_seq", "'%(asctime)s SigMa %(levelname)-10s: %(message)s' logging.basicConfig(format=FORMAT) def get_logger(verbosity=logging.INFO): ''' Returns logger object ''' logger", "= chromosomes_dists[i] starting_seqs_idxs = np.where(chromosome_dists >= threshold)[0] seqs = np.split(chromosome, starting_seqs_idxs)[1:] out.append(seqs) return", "seqs_names def to_json(file_name, dict_to_save): with open(file_name + '.json', 'w') as fp: json.dump(dict_to_save, fp)", "a) * np.random.sample(n_states) + a def random_seqs_from_json(file_name, n_seqs=10): seqs = [] seqs_names =", "n = samples[i] out_names.append(n) out_seqs.append(full_sample_to_chromosomes_seqs_by_threshold(samples_to_seq[n], samples_dists[n], threshold)) return zip(out_names, out_seqs) def full_sample_to_chromosomes_seqs_by_threshold(sample, dists_sample,", "range(len(samples)): count_mat[i, samples_objects[i]] = samples_counts[i] return count_mat def get_samples_names(file_name='data/nik-zainal2016-wgs-brca-mutations-for-hmm.json'): json_file = json.load(open(file_name)) samples", "out_seqs) def full_sample_to_chromosomes_seqs_by_threshold(sample, dists_sample, threshold): np_sample = np.array(sample) np_dists = np.array(dists_sample) starting_chromosome_idxs =", "sample_uniform_between_a_b(n_states, a=0.0, b=1.0): return (b - a) * np.random.sample(n_states) + a def random_seqs_from_json(file_name,", "def get_split_sequences(file_name, sample_numbers=None): json_file = json.load(open(file_name)) samples = json_file[u'samples'] samples_to_seq = json_file[u'sampleToSequence'] samples_dists", "json_file[u'samples'] return samples def get_split_sequences_by_threshold(file_name, threshold, sample_numbers=None): json_file = json.load(open(file_name)) samples = json_file[u'samples']", "= json.load(open(file_name)) samples = json_file[u'samples'] samples_to_seq = json_file[u'sampleToSequence'] # finding num_object + counting", "np.max(objects)) num_objects += 1 count_mat = np.zeros((len(samples), num_objects)) for i in range(len(samples)): count_mat[i,", "f in os.listdir(dir_path)] samples = get_samples_names() missing_indices = [] for i in range(len(samples)):", "in enumerate(hits): if hit == 0: noised_seq[j] = noise_change_dist.sample() noised_seqs.append(noised_seq) return seqs, noised_seqs", "hit in enumerate(hits): if hit == 0: noised_seq[j] = noise_change_dist.sample() noised_seqs.append(noised_seq) return seqs,", "random_seqs_from_json(file_name, n_seqs=10): seqs = [] seqs_names = [] json_file = json.load(open(file_name)) samples =", "seq in seqs: out.append(seq[0]) return np.array(out) def sample_indices_not_in_dir(dir_path): import os samples_in_dir = [f[:-5]", "fp) def full_sample_to_chromosomes_seqs(sample, dists_sample): np_sample = np.array(sample) starting_chromosome_idxs = np.where(np.array(dists_sample) >= 1e100)[0] return", "samples_objects[i]] = samples_counts[i] return count_mat def get_samples_names(file_name='data/nik-zainal2016-wgs-brca-mutations-for-hmm.json'): json_file = json.load(open(file_name)) samples = json_file[u'samples']", "i in range(len(samples)): count_mat[i, samples_objects[i]] = samples_counts[i] return count_mat def get_samples_names(file_name='data/nik-zainal2016-wgs-brca-mutations-for-hmm.json'): json_file =", "logger ################################################################################ # UTILS ################################################################################ def sample_and_noise(model, noise_dist, n_seqs, seqs_len): noise_change_dist = DiscreteDistribution(dict(zip(range(96),", "n_seqs=10): seqs = [] seqs_names = [] json_file = json.load(open(file_name)) samples = json_file[u'samples']", "= json_file[u'samples'] samples_to_seq = json_file[u'sampleToSequence'] out_seqs = [] out_names = [] for n", "Logging format FORMAT = '%(asctime)s SigMa %(levelname)-10s: %(message)s' logging.basicConfig(format=FORMAT) def get_logger(verbosity=logging.INFO): ''' Returns", "np.load(file + '.npy') def sample_uniform_between_a_b(n_states, a=0.0, b=1.0): return (b - a) * np.random.sample(n_states)", "def seqs_to_seq_of_prefix(seqs): out = [] for seq in seqs: out.append(seq[0]) return np.array(out) def", "= [] json_file = json.load(open(file_name)) samples = json_file[u'samples'] samples_to_seq = json_file[u'sampleToSequence'] samples =", "= [] for sample in samples: objects, counts = np.unique(samples_to_seq[sample], return_counts=True) samples_objects.append(objects) samples_counts.append(counts)", "samples: objects, counts = np.unique(samples_to_seq[sample], return_counts=True) samples_objects.append(objects) samples_counts.append(counts) num_objects = max(num_objects, np.max(objects)) num_objects", "dict_to_save): with open(file_name + '.json', 'w') as fp: json.dump(dict_to_save, fp) def full_sample_to_chromosomes_seqs(sample, dists_sample):", "in seqs: out.append(seq[0]) return np.array(out) def sample_indices_not_in_dir(dir_path): import os samples_in_dir = [f[:-5] for", "json.load(open(file_name)) samples = json_file[u'samples'] samples_to_seq = json_file[u'sampleToSequence'] samples_dists = json_file[u'sampleToPrevMutDists'] out_seqs = []", "return np.split(np_sample, starting_chromosome_idxs)[1:] def load_json(file_name): return json.load(open(file_name)) def get_split_sequences(file_name, sample_numbers=None): json_file = json.load(open(file_name))", "################################################################################ # LOGGING ################################################################################ import logging # Logging format FORMAT = '%(asctime)s SigMa", "= noise_change_dist.sample() noised_seqs.append(noised_seq) return seqs, noised_seqs def get_emissions(file='data\\emissions_for_breast_cancer'): return np.load(file + '.npy') def", "a=0.0, b=1.0): return (b - a) * np.random.sample(n_states) + a def random_seqs_from_json(file_name, n_seqs=10):", "= np.array(sample) starting_chromosome_idxs = np.where(np.array(dists_sample) >= 1e100)[0] return np.split(np_sample, starting_chromosome_idxs)[1:] def load_json(file_name): return", "max(num_objects, np.max(objects)) num_objects += 1 count_mat = np.zeros((len(samples), num_objects)) for i in range(len(samples)):", "return zip(out_names, out_seqs) def full_sample_to_chromosomes_seqs_by_threshold(sample, dists_sample, threshold): np_sample = np.array(sample) np_dists = np.array(dists_sample)", "################################################################################ def sample_and_noise(model, noise_dist, n_seqs, seqs_len): noise_change_dist = DiscreteDistribution(dict(zip(range(96), [1.0 / 96] *", "def full_sample_to_chromosomes_seqs(sample, dists_sample): np_sample = np.array(sample) starting_chromosome_idxs = np.where(np.array(dists_sample) >= 1e100)[0] return np.split(np_sample,", "+ '.npy') def sample_uniform_between_a_b(n_states, a=0.0, b=1.0): return (b - a) * np.random.sample(n_states) +", "get_split_sequences(file_name, sample_numbers=None): json_file = json.load(open(file_name)) samples = json_file[u'samples'] samples_to_seq = json_file[u'sampleToSequence'] samples_dists =", "samples_to_seq = json_file[u'sampleToSequence'] samples_dists = json_file[u'sampleToPrevMutDists'] out_seqs = [] out_names = [] if", "json_file[u'samples'] samples_to_seq = json_file[u'sampleToSequence'] out_seqs = [] out_names = [] for n in", "json_file[u'sampleToSequence'] samples_dists = json_file[u'sampleToPrevMutDists'] out_seqs = [] out_names = [] if sample_numbers is", "in sample_numbers: n = samples[i] out_names.append(n) out_seqs.append(full_sample_to_chromosomes_seqs_by_threshold(samples_to_seq[n], samples_dists[n], threshold)) return zip(out_names, out_seqs) def", "= 0 samples_objects = [] samples_counts = [] for sample in samples: objects,", "finding num_object + counting num_objects = 0 samples_objects = [] samples_counts = []", "96))) seqs = [] noised_seqs = [] for i in range(n_seqs): seq =", "for n in samples: out_names.append(n) out_seqs.append(samples_to_seq[n]) return zip(out_names, out_seqs) def get_count_sequences_as_mat(file_name='data/nik-zainal2016-wgs-brca-mutations-for-hmm.json'): json_file =", "np.unique(samples_to_seq[sample], return_counts=True) samples_objects.append(objects) samples_counts.append(counts) num_objects = max(num_objects, np.max(objects)) num_objects += 1 count_mat =", "in os.listdir(dir_path)] samples = get_samples_names() missing_indices = [] for i in range(len(samples)): if", "get_samples_names() missing_indices = [] for i in range(len(samples)): if samples[i] not in samples_in_dir:", "return count_mat def get_samples_names(file_name='data/nik-zainal2016-wgs-brca-mutations-for-hmm.json'): json_file = json.load(open(file_name)) samples = json_file[u'samples'] return samples def", "'.npy') def sample_uniform_between_a_b(n_states, a=0.0, b=1.0): return (b - a) * np.random.sample(n_states) + a", "range(len(chromosomes)): chromosome = chromosomes[i] chromosome_dists = chromosomes_dists[i] starting_seqs_idxs = np.where(chromosome_dists >= threshold)[0] seqs", "json_file = json.load(open(file_name)) samples = json_file[u'samples'] samples_to_seq = json_file[u'sampleToSequence'] out_seqs = [] out_names", "np.split(chromosome, starting_seqs_idxs)[1:] out.append(seqs) return out def seqs_to_seq(seqs): out = [] for seq in", "= range(len(samples)) for i in sample_numbers: n = samples[i] out_names.append(n) out_seqs.append(full_sample_to_chromosomes_seqs(samples_to_seq[n], samples_dists[n])) return", "chromosomes[i] chromosome_dists = chromosomes_dists[i] starting_seqs_idxs = np.where(chromosome_dists >= threshold)[0] seqs = np.split(chromosome, starting_seqs_idxs)[1:]", "= np.where(np_dists >= 1e100)[0] chromosomes = np.split(np_sample, starting_chromosome_idxs)[1:] chromosomes_dists = np.split(np_dists, starting_chromosome_idxs)[1:] out", "out_seqs) def get_count_sequences_as_mat(file_name='data/nik-zainal2016-wgs-brca-mutations-for-hmm.json'): json_file = json.load(open(file_name)) samples = json_file[u'samples'] samples_to_seq = json_file[u'sampleToSequence'] #", "= np.split(np_dists, starting_chromosome_idxs)[1:] out = [] for i in range(len(chromosomes)): chromosome = chromosomes[i]", "object ''' logger = logging.getLogger(__name__) logger.setLevel(verbosity) return logger ################################################################################ # UTILS ################################################################################ def", "import logging # Logging format FORMAT = '%(asctime)s SigMa %(levelname)-10s: %(message)s' logging.basicConfig(format=FORMAT) def", "get_count_sequences_as_mat(file_name='data/nik-zainal2016-wgs-brca-mutations-for-hmm.json'): json_file = json.load(open(file_name)) samples = json_file[u'samples'] samples_to_seq = json_file[u'sampleToSequence'] # finding num_object", "def sample_and_noise(model, noise_dist, n_seqs, seqs_len): noise_change_dist = DiscreteDistribution(dict(zip(range(96), [1.0 / 96] * 96)))", "def to_json(file_name, dict_to_save): with open(file_name + '.json', 'w') as fp: json.dump(dict_to_save, fp) def", "get_full_sequences(file_name='data/nik-zainal2016-wgs-brca-mutations-for-hmm.json'): json_file = json.load(open(file_name)) samples = json_file[u'samples'] samples_to_seq = json_file[u'sampleToSequence'] out_seqs = []", "None: sample_numbers = range(len(samples)) for i in sample_numbers: n = samples[i] out_names.append(n) out_seqs.append(full_sample_to_chromosomes_seqs_by_threshold(samples_to_seq[n],", "+ a def random_seqs_from_json(file_name, n_seqs=10): seqs = [] seqs_names = [] json_file =", "(b - a) * np.random.sample(n_states) + a def random_seqs_from_json(file_name, n_seqs=10): seqs = []", "as np from pomegranate import * import json ################################################################################ # LOGGING ################################################################################ import", "return zip(out_names, out_seqs) def get_count_sequences_as_mat(file_name='data/nik-zainal2016-wgs-brca-mutations-for-hmm.json'): json_file = json.load(open(file_name)) samples = json_file[u'samples'] samples_to_seq =", "json_file = json.load(open(file_name)) samples = json_file[u'samples'] samples_to_seq = json_file[u'sampleToSequence'] # finding num_object +", "= [] for i in range(n_seqs): seq = np.array(model.sample(seqs_len)) seqs.append(seq) noised_seq = seq.copy()", "seqs = np.split(chromosome, starting_seqs_idxs)[1:] out.append(seqs) return out def seqs_to_seq(seqs): out = [] for", "= np.array(dists_sample) starting_chromosome_idxs = np.where(np_dists >= 1e100)[0] chromosomes = np.split(np_sample, starting_chromosome_idxs)[1:] chromosomes_dists =", "+= 1 count_mat = np.zeros((len(samples), num_objects)) for i in range(len(samples)): count_mat[i, samples_objects[i]] =", "+ counting num_objects = 0 samples_objects = [] samples_counts = [] for sample", "= np.array(sample) np_dists = np.array(dists_sample) starting_chromosome_idxs = np.where(np_dists >= 1e100)[0] chromosomes = np.split(np_sample,", "seqs: out.extend(seq) return np.array(out) def seqs_to_seq_of_prefix(seqs): out = [] for seq in seqs:", "* import json ################################################################################ # LOGGING ################################################################################ import logging # Logging format FORMAT", "json_file[u'sampleToPrevMutDists'] out_seqs = [] out_names = [] if sample_numbers is None: sample_numbers =", "hit == 0: noised_seq[j] = noise_change_dist.sample() noised_seqs.append(noised_seq) return seqs, noised_seqs def get_emissions(file='data\\emissions_for_breast_cancer'): return", "96] * 96))) seqs = [] noised_seqs = [] for i in range(n_seqs):", "samples_counts = [] for sample in samples: objects, counts = np.unique(samples_to_seq[sample], return_counts=True) samples_objects.append(objects)", "get_emissions(file='data\\emissions_for_breast_cancer'): return np.load(file + '.npy') def sample_uniform_between_a_b(n_states, a=0.0, b=1.0): return (b - a)", "# UTILS ################################################################################ def sample_and_noise(model, noise_dist, n_seqs, seqs_len): noise_change_dist = DiscreteDistribution(dict(zip(range(96), [1.0 /", "import * import json ################################################################################ # LOGGING ################################################################################ import logging # Logging format" ]
[ "self.ser.write('ATZ\\r'.encode()) time.sleep(0.5) self.ser.write('AT+CMGF=1\\r'.encode()) time.sleep(0.5) self.ser.write(('''AT+CMGS=\"''' + recipient + '''\"\\r''').encode()) time.sleep(0.5) self.ser.write((message + \"\\r\").encode())", "= message def connectPhone(self): self.ser = serial.Serial('COM7', 9600, timeout=5, xonxoff = False, rtscts", "serial.PARITY_NONE, stopbits = serial.STOPBITS_ONE) time.sleep(1) def sendMessage(self,recipient, message): self.ser.write('ATZ\\r'.encode()) time.sleep(0.5) self.ser.write('AT+CMGF=1\\r'.encode()) time.sleep(0.5) self.ser.write(('''AT+CMGS=\"'''", "= recipient # self.content = message def connectPhone(self): self.ser = serial.Serial('COM7', 9600, timeout=5,", "= serial.PARITY_NONE, stopbits = serial.STOPBITS_ONE) time.sleep(1) def sendMessage(self,recipient, message): self.ser.write('ATZ\\r'.encode()) time.sleep(0.5) self.ser.write('AT+CMGF=1\\r'.encode()) time.sleep(0.5)", "self.ser.write((message + \"\\r\").encode()) time.sleep(0.5) self.ser.write(chr(26).encode()) time.sleep(0.5) def disconnectPhone(self): self.ser.close() sms = TextMessage() sms.connectPhone()", "self.recipient = recipient # self.content = message def connectPhone(self): self.ser = serial.Serial('COM7', 9600,", "serial.STOPBITS_ONE) time.sleep(1) def sendMessage(self,recipient, message): self.ser.write('ATZ\\r'.encode()) time.sleep(0.5) self.ser.write('AT+CMGF=1\\r'.encode()) time.sleep(0.5) self.ser.write(('''AT+CMGS=\"''' + recipient +", "'''\"\\r''').encode()) time.sleep(0.5) self.ser.write((message + \"\\r\").encode()) time.sleep(0.5) self.ser.write(chr(26).encode()) time.sleep(0.5) def disconnectPhone(self): self.ser.close() sms =", "xonxoff = False, rtscts = False, bytesize = serial.EIGHTBITS, parity = serial.PARITY_NONE, stopbits", "TextMessage: # def __init__(self): # self.recipient = recipient # self.content = message def", "self.ser = serial.Serial('COM7', 9600, timeout=5, xonxoff = False, rtscts = False, bytesize =", "self.ser.write(chr(26).encode()) time.sleep(0.5) def disconnectPhone(self): self.ser.close() sms = TextMessage() sms.connectPhone() for numbers in num:", "+ \"\\r\").encode()) time.sleep(0.5) self.ser.write(chr(26).encode()) time.sleep(0.5) def disconnectPhone(self): self.ser.close() sms = TextMessage() sms.connectPhone() for", "sendMessage(self,recipient, message): self.ser.write('ATZ\\r'.encode()) time.sleep(0.5) self.ser.write('AT+CMGF=1\\r'.encode()) time.sleep(0.5) self.ser.write(('''AT+CMGS=\"''' + recipient + '''\"\\r''').encode()) time.sleep(0.5) self.ser.write((message", "time import sys,ast message=''; c=' '.join(sys.argv[1:]) num=c.replace(\"[\",\"\").replace(\"]\",\"\").split(\",\") message=num.pop() class TextMessage: # def __init__(self):", "__init__(self): # self.recipient = recipient # self.content = message def connectPhone(self): self.ser =", "= serial.Serial('COM7', 9600, timeout=5, xonxoff = False, rtscts = False, bytesize = serial.EIGHTBITS,", "time.sleep(0.5) self.ser.write('AT+CMGF=1\\r'.encode()) time.sleep(0.5) self.ser.write(('''AT+CMGS=\"''' + recipient + '''\"\\r''').encode()) time.sleep(0.5) self.ser.write((message + \"\\r\").encode()) time.sleep(0.5)", "serial.Serial('COM7', 9600, timeout=5, xonxoff = False, rtscts = False, bytesize = serial.EIGHTBITS, parity", "def sendMessage(self,recipient, message): self.ser.write('ATZ\\r'.encode()) time.sleep(0.5) self.ser.write('AT+CMGF=1\\r'.encode()) time.sleep(0.5) self.ser.write(('''AT+CMGS=\"''' + recipient + '''\"\\r''').encode()) time.sleep(0.5)", "self.ser.write(('''AT+CMGS=\"''' + recipient + '''\"\\r''').encode()) time.sleep(0.5) self.ser.write((message + \"\\r\").encode()) time.sleep(0.5) self.ser.write(chr(26).encode()) time.sleep(0.5) def", "+ '''\"\\r''').encode()) time.sleep(0.5) self.ser.write((message + \"\\r\").encode()) time.sleep(0.5) self.ser.write(chr(26).encode()) time.sleep(0.5) def disconnectPhone(self): self.ser.close() sms", "'.join(sys.argv[1:]) num=c.replace(\"[\",\"\").replace(\"]\",\"\").split(\",\") message=num.pop() class TextMessage: # def __init__(self): # self.recipient = recipient #", "serial.EIGHTBITS, parity = serial.PARITY_NONE, stopbits = serial.STOPBITS_ONE) time.sleep(1) def sendMessage(self,recipient, message): self.ser.write('ATZ\\r'.encode()) time.sleep(0.5)", "time.sleep(0.5) self.ser.write(('''AT+CMGS=\"''' + recipient + '''\"\\r''').encode()) time.sleep(0.5) self.ser.write((message + \"\\r\").encode()) time.sleep(0.5) self.ser.write(chr(26).encode()) time.sleep(0.5)", "class TextMessage: # def __init__(self): # self.recipient = recipient # self.content = message", "= serial.STOPBITS_ONE) time.sleep(1) def sendMessage(self,recipient, message): self.ser.write('ATZ\\r'.encode()) time.sleep(0.5) self.ser.write('AT+CMGF=1\\r'.encode()) time.sleep(0.5) self.ser.write(('''AT+CMGS=\"''' + recipient", "recipient + '''\"\\r''').encode()) time.sleep(0.5) self.ser.write((message + \"\\r\").encode()) time.sleep(0.5) self.ser.write(chr(26).encode()) time.sleep(0.5) def disconnectPhone(self): self.ser.close()", "time.sleep(0.5) self.ser.write(chr(26).encode()) time.sleep(0.5) def disconnectPhone(self): self.ser.close() sms = TextMessage() sms.connectPhone() for numbers in", "serial import time import sys,ast message=''; c=' '.join(sys.argv[1:]) num=c.replace(\"[\",\"\").replace(\"]\",\"\").split(\",\") message=num.pop() class TextMessage: #", "False, rtscts = False, bytesize = serial.EIGHTBITS, parity = serial.PARITY_NONE, stopbits = serial.STOPBITS_ONE)", "# self.recipient = recipient # self.content = message def connectPhone(self): self.ser = serial.Serial('COM7',", "message def connectPhone(self): self.ser = serial.Serial('COM7', 9600, timeout=5, xonxoff = False, rtscts =", "def __init__(self): # self.recipient = recipient # self.content = message def connectPhone(self): self.ser", "= serial.EIGHTBITS, parity = serial.PARITY_NONE, stopbits = serial.STOPBITS_ONE) time.sleep(1) def sendMessage(self,recipient, message): self.ser.write('ATZ\\r'.encode())", "self.content = message def connectPhone(self): self.ser = serial.Serial('COM7', 9600, timeout=5, xonxoff = False,", "9600, timeout=5, xonxoff = False, rtscts = False, bytesize = serial.EIGHTBITS, parity =", "time.sleep(0.5) def disconnectPhone(self): self.ser.close() sms = TextMessage() sms.connectPhone() for numbers in num: print(numbers)", "num=c.replace(\"[\",\"\").replace(\"]\",\"\").split(\",\") message=num.pop() class TextMessage: # def __init__(self): # self.recipient = recipient # self.content", "sms = TextMessage() sms.connectPhone() for numbers in num: print(numbers) sms.sendMessage(numbers,message) #time.sleep(0.5) sms.disconnectPhone() print", "import time import sys,ast message=''; c=' '.join(sys.argv[1:]) num=c.replace(\"[\",\"\").replace(\"]\",\"\").split(\",\") message=num.pop() class TextMessage: # def", "def connectPhone(self): self.ser = serial.Serial('COM7', 9600, timeout=5, xonxoff = False, rtscts = False,", "= False, rtscts = False, bytesize = serial.EIGHTBITS, parity = serial.PARITY_NONE, stopbits =", "self.ser.write('AT+CMGF=1\\r'.encode()) time.sleep(0.5) self.ser.write(('''AT+CMGS=\"''' + recipient + '''\"\\r''').encode()) time.sleep(0.5) self.ser.write((message + \"\\r\").encode()) time.sleep(0.5) self.ser.write(chr(26).encode())", "= False, bytesize = serial.EIGHTBITS, parity = serial.PARITY_NONE, stopbits = serial.STOPBITS_ONE) time.sleep(1) def", "disconnectPhone(self): self.ser.close() sms = TextMessage() sms.connectPhone() for numbers in num: print(numbers) sms.sendMessage(numbers,message) #time.sleep(0.5)", "self.ser.close() sms = TextMessage() sms.connectPhone() for numbers in num: print(numbers) sms.sendMessage(numbers,message) #time.sleep(0.5) sms.disconnectPhone()", "message=num.pop() class TextMessage: # def __init__(self): # self.recipient = recipient # self.content =", "connectPhone(self): self.ser = serial.Serial('COM7', 9600, timeout=5, xonxoff = False, rtscts = False, bytesize", "# self.content = message def connectPhone(self): self.ser = serial.Serial('COM7', 9600, timeout=5, xonxoff =", "bytesize = serial.EIGHTBITS, parity = serial.PARITY_NONE, stopbits = serial.STOPBITS_ONE) time.sleep(1) def sendMessage(self,recipient, message):", "= TextMessage() sms.connectPhone() for numbers in num: print(numbers) sms.sendMessage(numbers,message) #time.sleep(0.5) sms.disconnectPhone() print (\"1\")", "message): self.ser.write('ATZ\\r'.encode()) time.sleep(0.5) self.ser.write('AT+CMGF=1\\r'.encode()) time.sleep(0.5) self.ser.write(('''AT+CMGS=\"''' + recipient + '''\"\\r''').encode()) time.sleep(0.5) self.ser.write((message +", "stopbits = serial.STOPBITS_ONE) time.sleep(1) def sendMessage(self,recipient, message): self.ser.write('ATZ\\r'.encode()) time.sleep(0.5) self.ser.write('AT+CMGF=1\\r'.encode()) time.sleep(0.5) self.ser.write(('''AT+CMGS=\"''' +", "recipient # self.content = message def connectPhone(self): self.ser = serial.Serial('COM7', 9600, timeout=5, xonxoff", "time.sleep(1) def sendMessage(self,recipient, message): self.ser.write('ATZ\\r'.encode()) time.sleep(0.5) self.ser.write('AT+CMGF=1\\r'.encode()) time.sleep(0.5) self.ser.write(('''AT+CMGS=\"''' + recipient + '''\"\\r''').encode())", "+ recipient + '''\"\\r''').encode()) time.sleep(0.5) self.ser.write((message + \"\\r\").encode()) time.sleep(0.5) self.ser.write(chr(26).encode()) time.sleep(0.5) def disconnectPhone(self):", "timeout=5, xonxoff = False, rtscts = False, bytesize = serial.EIGHTBITS, parity = serial.PARITY_NONE,", "import sys,ast message=''; c=' '.join(sys.argv[1:]) num=c.replace(\"[\",\"\").replace(\"]\",\"\").split(\",\") message=num.pop() class TextMessage: # def __init__(self): #", "import serial import time import sys,ast message=''; c=' '.join(sys.argv[1:]) num=c.replace(\"[\",\"\").replace(\"]\",\"\").split(\",\") message=num.pop() class TextMessage:", "time.sleep(0.5) self.ser.write((message + \"\\r\").encode()) time.sleep(0.5) self.ser.write(chr(26).encode()) time.sleep(0.5) def disconnectPhone(self): self.ser.close() sms = TextMessage()", "\"\\r\").encode()) time.sleep(0.5) self.ser.write(chr(26).encode()) time.sleep(0.5) def disconnectPhone(self): self.ser.close() sms = TextMessage() sms.connectPhone() for numbers", "c=' '.join(sys.argv[1:]) num=c.replace(\"[\",\"\").replace(\"]\",\"\").split(\",\") message=num.pop() class TextMessage: # def __init__(self): # self.recipient = recipient", "parity = serial.PARITY_NONE, stopbits = serial.STOPBITS_ONE) time.sleep(1) def sendMessage(self,recipient, message): self.ser.write('ATZ\\r'.encode()) time.sleep(0.5) self.ser.write('AT+CMGF=1\\r'.encode())", "False, bytesize = serial.EIGHTBITS, parity = serial.PARITY_NONE, stopbits = serial.STOPBITS_ONE) time.sleep(1) def sendMessage(self,recipient,", "# def __init__(self): # self.recipient = recipient # self.content = message def connectPhone(self):", "def disconnectPhone(self): self.ser.close() sms = TextMessage() sms.connectPhone() for numbers in num: print(numbers) sms.sendMessage(numbers,message)", "message=''; c=' '.join(sys.argv[1:]) num=c.replace(\"[\",\"\").replace(\"]\",\"\").split(\",\") message=num.pop() class TextMessage: # def __init__(self): # self.recipient =", "rtscts = False, bytesize = serial.EIGHTBITS, parity = serial.PARITY_NONE, stopbits = serial.STOPBITS_ONE) time.sleep(1)", "sys,ast message=''; c=' '.join(sys.argv[1:]) num=c.replace(\"[\",\"\").replace(\"]\",\"\").split(\",\") message=num.pop() class TextMessage: # def __init__(self): # self.recipient" ]
[ "world\" @app.route('/youtube/<link>') def youtube(link): print(\"ENTERED\") url = 'https://www.youtube.com/watch?v='+link print(url) result,status = (baritone.pipeline(url,'youtube')) convert", "result,status = (baritone.pipeline(url,'youtube')) convert = { 'url': url, 'text': result, 'converted':status } return", "terminal\") return \"Hello world\" @app.route('/youtube/<link>') def youtube(link): print(\"ENTERED\") url = 'https://www.youtube.com/watch?v='+link print(url) result,status", "print(\"Hello from terminal\") return \"Hello world\" @app.route('/youtube/<link>') def youtube(link): print(\"ENTERED\") url = 'https://www.youtube.com/watch?v='+link", "Flask import baritone import json app = Flask(__name__) @app.route('/') def hello(): print(\"Hello from", "def youtube(link): print(\"ENTERED\") url = 'https://www.youtube.com/watch?v='+link print(url) result,status = (baritone.pipeline(url,'youtube')) convert = {", "hello(): print(\"Hello from terminal\") return \"Hello world\" @app.route('/youtube/<link>') def youtube(link): print(\"ENTERED\") url =", "convert = { 'url': url, 'text': result, 'converted':status } return json.dumps(convert) if __name__", "import json app = Flask(__name__) @app.route('/') def hello(): print(\"Hello from terminal\") return \"Hello", "url = 'https://www.youtube.com/watch?v='+link print(url) result,status = (baritone.pipeline(url,'youtube')) convert = { 'url': url, 'text':", "@app.route('/') def hello(): print(\"Hello from terminal\") return \"Hello world\" @app.route('/youtube/<link>') def youtube(link): print(\"ENTERED\")", "print(\"ENTERED\") url = 'https://www.youtube.com/watch?v='+link print(url) result,status = (baritone.pipeline(url,'youtube')) convert = { 'url': url,", "\"Hello world\" @app.route('/youtube/<link>') def youtube(link): print(\"ENTERED\") url = 'https://www.youtube.com/watch?v='+link print(url) result,status = (baritone.pipeline(url,'youtube'))", "flask import Flask import baritone import json app = Flask(__name__) @app.route('/') def hello():", "from flask import Flask import baritone import json app = Flask(__name__) @app.route('/') def", "'url': url, 'text': result, 'converted':status } return json.dumps(convert) if __name__ == '__main__': print(\"Starting", "= Flask(__name__) @app.route('/') def hello(): print(\"Hello from terminal\") return \"Hello world\" @app.route('/youtube/<link>') def", "import baritone import json app = Flask(__name__) @app.route('/') def hello(): print(\"Hello from terminal\")", "@app.route('/youtube/<link>') def youtube(link): print(\"ENTERED\") url = 'https://www.youtube.com/watch?v='+link print(url) result,status = (baritone.pipeline(url,'youtube')) convert =", "'https://www.youtube.com/watch?v='+link print(url) result,status = (baritone.pipeline(url,'youtube')) convert = { 'url': url, 'text': result, 'converted':status", "url, 'text': result, 'converted':status } return json.dumps(convert) if __name__ == '__main__': print(\"Starting server\")", "(baritone.pipeline(url,'youtube')) convert = { 'url': url, 'text': result, 'converted':status } return json.dumps(convert) if", "Flask(__name__) @app.route('/') def hello(): print(\"Hello from terminal\") return \"Hello world\" @app.route('/youtube/<link>') def youtube(link):", "return \"Hello world\" @app.route('/youtube/<link>') def youtube(link): print(\"ENTERED\") url = 'https://www.youtube.com/watch?v='+link print(url) result,status =", "'text': result, 'converted':status } return json.dumps(convert) if __name__ == '__main__': print(\"Starting server\") app.run(host='0.0.0.0')", "{ 'url': url, 'text': result, 'converted':status } return json.dumps(convert) if __name__ == '__main__':", "print(url) result,status = (baritone.pipeline(url,'youtube')) convert = { 'url': url, 'text': result, 'converted':status }", "import Flask import baritone import json app = Flask(__name__) @app.route('/') def hello(): print(\"Hello", "json app = Flask(__name__) @app.route('/') def hello(): print(\"Hello from terminal\") return \"Hello world\"", "def hello(): print(\"Hello from terminal\") return \"Hello world\" @app.route('/youtube/<link>') def youtube(link): print(\"ENTERED\") url", "app = Flask(__name__) @app.route('/') def hello(): print(\"Hello from terminal\") return \"Hello world\" @app.route('/youtube/<link>')", "from terminal\") return \"Hello world\" @app.route('/youtube/<link>') def youtube(link): print(\"ENTERED\") url = 'https://www.youtube.com/watch?v='+link print(url)", "= { 'url': url, 'text': result, 'converted':status } return json.dumps(convert) if __name__ ==", "youtube(link): print(\"ENTERED\") url = 'https://www.youtube.com/watch?v='+link print(url) result,status = (baritone.pipeline(url,'youtube')) convert = { 'url':", "= (baritone.pipeline(url,'youtube')) convert = { 'url': url, 'text': result, 'converted':status } return json.dumps(convert)", "= 'https://www.youtube.com/watch?v='+link print(url) result,status = (baritone.pipeline(url,'youtube')) convert = { 'url': url, 'text': result,", "baritone import json app = Flask(__name__) @app.route('/') def hello(): print(\"Hello from terminal\") return" ]
[ "for details. import json import unittest import os.path import requests_mock from pypd import", "PagerDuty. # See LICENSE for details. import json import unittest import os.path import", "self.assertEqual(data[\"routing_keys\"][0], \"<KEY>\") @requests_mock.Mocker() def test_fetch_all_rulesets(self, m): # setup mocked request uris service_url =", "request uris service_url = '{0}/rulesets'.format( self.base_url ) m.register_uri( 'GET', service_url, json=self.rulesets_data, complete_qs=False )", "25 base_path = os.path.join( os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'data') path = os.path.join(base_path, 'sample_rulesets.json') with open(path) as", "'ruleset': self.rulesets, } path = os.path.join(base_path, 'sample_rules.json') with open(path) as f: self.rules_data =", "self.ruleset_data = { 'ruleset': self.rulesets, } path = os.path.join(base_path, 'sample_rules.json') with open(path) as", "test_fetch_all_rulesets(self, m): # setup mocked request uris service_url = '{0}/rulesets'.format( self.base_url ) m.register_uri(", "LICENSE for details. import json import unittest import os.path import requests_mock from pypd", "self.rulesets = list(filter( lambda s: s['id'] == self.rulesetid, self.rulesets_data[\"rulesets\"], ))[0] self.ruleset_data = {", "os.path.join( os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'data') path = os.path.join(base_path, 'sample_rulesets.json') with open(path) as f: self.rulesets_data =", "rulesets = Rulesets.fetch(self.rulesetid, api_key=self.api_key) data = rulesets.get_ruleset(self.rulesetid) self.assertEqual(len(data[\"routing_keys\"]), 1) self.assertEqual(data[\"routing_keys\"][0], \"<KEY>\") @requests_mock.Mocker() def", "rulesets.get_ruleset(self.rulesetid) self.assertEqual(len(data[\"routing_keys\"]), 1) self.assertEqual(data[\"routing_keys\"][0], \"<KEY>\") @requests_mock.Mocker() def test_fetch_all_rulesets(self, m): # setup mocked request", "= '{0}/rulesets'.format( self.base_url ) m.register_uri( 'GET', service_url, json=self.rulesets_data, complete_qs=False ) rulesets = Rulesets._fetch_all(api_key=self.api_key)", "self.rules_data = json.load(f) @requests_mock.Mocker() def test_fetch_a_ruleset(self, m): # setup mocked request uris service_url", "# See LICENSE for details. import json import unittest import os.path import requests_mock", "uris service_url = '{0}/rulesets'.format( self.base_url ) m.register_uri( 'GET', service_url, json=self.rulesets_data, complete_qs=False ) rulesets", ") m.register_uri( 'GET', service_url, json=self.ruleset_data, complete_qs=False ) rulesets = Rulesets.fetch(self.rulesetid, api_key=self.api_key) data =", "\"<KEY>\") @requests_mock.Mocker() def test_fetch_all_rulesets(self, m): # setup mocked request uris service_url = '{0}/rulesets'.format(", "service_url = '{0}/rulesets/{1}'.format( self.base_url, self.rulesetid, ) m.register_uri( 'GET', service_url, json=self.ruleset_data, complete_qs=False ) rulesets", "'FAUX_API_KEY' self.limit = 25 base_path = os.path.join( os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'data') path = os.path.join(base_path, 'sample_rulesets.json')", "path = os.path.join(base_path, 'sample_rulesets.json') with open(path) as f: self.rulesets_data = json.load(f) self.rulesetid =", "f: self.rules_data = json.load(f) @requests_mock.Mocker() def test_fetch_a_ruleset(self, m): # setup mocked request uris", "self.api_key = 'FAUX_API_KEY' self.limit = 25 base_path = os.path.join( os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'data') path =", "unittest import os.path import requests_mock from pypd import Rulesets class IntegrationTestCase(unittest.TestCase): def setUp(self):", "= os.path.join(base_path, 'sample_rules.json') with open(path) as f: self.rules_data = json.load(f) @requests_mock.Mocker() def test_fetch_a_ruleset(self,", "'https://api.pagerduty.com' self.api_key = 'FAUX_API_KEY' self.limit = 25 base_path = os.path.join( os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'data') path", "# Copyright (c) PagerDuty. # See LICENSE for details. import json import unittest", "Rulesets class IntegrationTestCase(unittest.TestCase): def setUp(self): self.base_url = 'https://api.pagerduty.com' self.api_key = 'FAUX_API_KEY' self.limit =", ") m.register_uri( 'GET', service_url, json=self.rulesets_data, complete_qs=False ) rulesets = Rulesets._fetch_all(api_key=self.api_key) data = rulesets[0].get_rulesets()", "(c) PagerDuty. # See LICENSE for details. import json import unittest import os.path", "= \"0e84de00-9511-4380-9f4f-a7b568bb49a0\" self.rulesets = list(filter( lambda s: s['id'] == self.rulesetid, self.rulesets_data[\"rulesets\"], ))[0] self.ruleset_data", "} path = os.path.join(base_path, 'sample_rules.json') with open(path) as f: self.rules_data = json.load(f) @requests_mock.Mocker()", "Copyright (c) PagerDuty. # See LICENSE for details. import json import unittest import", "= os.path.join(base_path, 'sample_rulesets.json') with open(path) as f: self.rulesets_data = json.load(f) self.rulesetid = \"0e84de00-9511-4380-9f4f-a7b568bb49a0\"", "'GET', service_url, json=self.rulesets_data, complete_qs=False ) rulesets = Rulesets._fetch_all(api_key=self.api_key) data = rulesets[0].get_rulesets() self.assertEqual(len(data[0][\"routing_keys\"]), 1)", "# setup mocked request uris service_url = '{0}/rulesets'.format( self.base_url ) m.register_uri( 'GET', service_url,", "list(filter( lambda s: s['id'] == self.rulesetid, self.rulesets_data[\"rulesets\"], ))[0] self.ruleset_data = { 'ruleset': self.rulesets,", "def test_fetch_a_ruleset(self, m): # setup mocked request uris service_url = '{0}/rulesets/{1}'.format( self.base_url, self.rulesetid,", "self.base_url = 'https://api.pagerduty.com' self.api_key = 'FAUX_API_KEY' self.limit = 25 base_path = os.path.join( os.path.dirname(os.path.dirname(os.path.dirname(__file__))),", "'{0}/rulesets'.format( self.base_url ) m.register_uri( 'GET', service_url, json=self.rulesets_data, complete_qs=False ) rulesets = Rulesets._fetch_all(api_key=self.api_key) data", "= os.path.join( os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'data') path = os.path.join(base_path, 'sample_rulesets.json') with open(path) as f: self.rulesets_data", "))[0] self.ruleset_data = { 'ruleset': self.rulesets, } path = os.path.join(base_path, 'sample_rules.json') with open(path)", "import os.path import requests_mock from pypd import Rulesets class IntegrationTestCase(unittest.TestCase): def setUp(self): self.base_url", "<gh_stars>0 # Copyright (c) PagerDuty. # See LICENSE for details. import json import", "= json.load(f) self.rulesetid = \"0e84de00-9511-4380-9f4f-a7b568bb49a0\" self.rulesets = list(filter( lambda s: s['id'] == self.rulesetid,", "setUp(self): self.base_url = 'https://api.pagerduty.com' self.api_key = 'FAUX_API_KEY' self.limit = 25 base_path = os.path.join(", "as f: self.rules_data = json.load(f) @requests_mock.Mocker() def test_fetch_a_ruleset(self, m): # setup mocked request", "m): # setup mocked request uris service_url = '{0}/rulesets/{1}'.format( self.base_url, self.rulesetid, ) m.register_uri(", "@requests_mock.Mocker() def test_fetch_a_ruleset(self, m): # setup mocked request uris service_url = '{0}/rulesets/{1}'.format( self.base_url,", "= list(filter( lambda s: s['id'] == self.rulesetid, self.rulesets_data[\"rulesets\"], ))[0] self.ruleset_data = { 'ruleset':", "# setup mocked request uris service_url = '{0}/rulesets/{1}'.format( self.base_url, self.rulesetid, ) m.register_uri( 'GET',", "service_url = '{0}/rulesets'.format( self.base_url ) m.register_uri( 'GET', service_url, json=self.rulesets_data, complete_qs=False ) rulesets =", "with open(path) as f: self.rules_data = json.load(f) @requests_mock.Mocker() def test_fetch_a_ruleset(self, m): # setup", "service_url, json=self.rulesets_data, complete_qs=False ) rulesets = Rulesets._fetch_all(api_key=self.api_key) data = rulesets[0].get_rulesets() self.assertEqual(len(data[0][\"routing_keys\"]), 1) self.assertEqual(data[0][\"routing_keys\"][0],", "self.rulesetid, ) m.register_uri( 'GET', service_url, json=self.ruleset_data, complete_qs=False ) rulesets = Rulesets.fetch(self.rulesetid, api_key=self.api_key) data", "Rulesets.fetch(self.rulesetid, api_key=self.api_key) data = rulesets.get_ruleset(self.rulesetid) self.assertEqual(len(data[\"routing_keys\"]), 1) self.assertEqual(data[\"routing_keys\"][0], \"<KEY>\") @requests_mock.Mocker() def test_fetch_all_rulesets(self, m):", "self.rulesets, } path = os.path.join(base_path, 'sample_rules.json') with open(path) as f: self.rules_data = json.load(f)", "import requests_mock from pypd import Rulesets class IntegrationTestCase(unittest.TestCase): def setUp(self): self.base_url = 'https://api.pagerduty.com'", "= 'FAUX_API_KEY' self.limit = 25 base_path = os.path.join( os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'data') path = os.path.join(base_path,", "= '{0}/rulesets/{1}'.format( self.base_url, self.rulesetid, ) m.register_uri( 'GET', service_url, json=self.ruleset_data, complete_qs=False ) rulesets =", "test_fetch_a_ruleset(self, m): # setup mocked request uris service_url = '{0}/rulesets/{1}'.format( self.base_url, self.rulesetid, )", "IntegrationTestCase(unittest.TestCase): def setUp(self): self.base_url = 'https://api.pagerduty.com' self.api_key = 'FAUX_API_KEY' self.limit = 25 base_path", "f: self.rulesets_data = json.load(f) self.rulesetid = \"0e84de00-9511-4380-9f4f-a7b568bb49a0\" self.rulesets = list(filter( lambda s: s['id']", "self.base_url ) m.register_uri( 'GET', service_url, json=self.rulesets_data, complete_qs=False ) rulesets = Rulesets._fetch_all(api_key=self.api_key) data =", "{ 'ruleset': self.rulesets, } path = os.path.join(base_path, 'sample_rules.json') with open(path) as f: self.rules_data", "service_url, json=self.ruleset_data, complete_qs=False ) rulesets = Rulesets.fetch(self.rulesetid, api_key=self.api_key) data = rulesets.get_ruleset(self.rulesetid) self.assertEqual(len(data[\"routing_keys\"]), 1)", "\"0e84de00-9511-4380-9f4f-a7b568bb49a0\" self.rulesets = list(filter( lambda s: s['id'] == self.rulesetid, self.rulesets_data[\"rulesets\"], ))[0] self.ruleset_data =", "= Rulesets.fetch(self.rulesetid, api_key=self.api_key) data = rulesets.get_ruleset(self.rulesetid) self.assertEqual(len(data[\"routing_keys\"]), 1) self.assertEqual(data[\"routing_keys\"][0], \"<KEY>\") @requests_mock.Mocker() def test_fetch_all_rulesets(self,", "pypd import Rulesets class IntegrationTestCase(unittest.TestCase): def setUp(self): self.base_url = 'https://api.pagerduty.com' self.api_key = 'FAUX_API_KEY'", "os.path.join(base_path, 'sample_rules.json') with open(path) as f: self.rules_data = json.load(f) @requests_mock.Mocker() def test_fetch_a_ruleset(self, m):", "api_key=self.api_key) data = rulesets.get_ruleset(self.rulesetid) self.assertEqual(len(data[\"routing_keys\"]), 1) self.assertEqual(data[\"routing_keys\"][0], \"<KEY>\") @requests_mock.Mocker() def test_fetch_all_rulesets(self, m): #", "@requests_mock.Mocker() def test_fetch_all_rulesets(self, m): # setup mocked request uris service_url = '{0}/rulesets'.format( self.base_url", "path = os.path.join(base_path, 'sample_rules.json') with open(path) as f: self.rules_data = json.load(f) @requests_mock.Mocker() def", "mocked request uris service_url = '{0}/rulesets'.format( self.base_url ) m.register_uri( 'GET', service_url, json=self.rulesets_data, complete_qs=False", "= rulesets.get_ruleset(self.rulesetid) self.assertEqual(len(data[\"routing_keys\"]), 1) self.assertEqual(data[\"routing_keys\"][0], \"<KEY>\") @requests_mock.Mocker() def test_fetch_all_rulesets(self, m): # setup mocked", "'{0}/rulesets/{1}'.format( self.base_url, self.rulesetid, ) m.register_uri( 'GET', service_url, json=self.ruleset_data, complete_qs=False ) rulesets = Rulesets.fetch(self.rulesetid,", "== self.rulesetid, self.rulesets_data[\"rulesets\"], ))[0] self.ruleset_data = { 'ruleset': self.rulesets, } path = os.path.join(base_path,", "self.base_url, self.rulesetid, ) m.register_uri( 'GET', service_url, json=self.ruleset_data, complete_qs=False ) rulesets = Rulesets.fetch(self.rulesetid, api_key=self.api_key)", "import json import unittest import os.path import requests_mock from pypd import Rulesets class", "def setUp(self): self.base_url = 'https://api.pagerduty.com' self.api_key = 'FAUX_API_KEY' self.limit = 25 base_path =", "setup mocked request uris service_url = '{0}/rulesets'.format( self.base_url ) m.register_uri( 'GET', service_url, json=self.rulesets_data,", "as f: self.rulesets_data = json.load(f) self.rulesetid = \"0e84de00-9511-4380-9f4f-a7b568bb49a0\" self.rulesets = list(filter( lambda s:", "'sample_rules.json') with open(path) as f: self.rules_data = json.load(f) @requests_mock.Mocker() def test_fetch_a_ruleset(self, m): #", "self.assertEqual(len(data[\"routing_keys\"]), 1) self.assertEqual(data[\"routing_keys\"][0], \"<KEY>\") @requests_mock.Mocker() def test_fetch_all_rulesets(self, m): # setup mocked request uris", "class IntegrationTestCase(unittest.TestCase): def setUp(self): self.base_url = 'https://api.pagerduty.com' self.api_key = 'FAUX_API_KEY' self.limit = 25", "json=self.rulesets_data, complete_qs=False ) rulesets = Rulesets._fetch_all(api_key=self.api_key) data = rulesets[0].get_rulesets() self.assertEqual(len(data[0][\"routing_keys\"]), 1) self.assertEqual(data[0][\"routing_keys\"][0], \"<KEY>\")", "= json.load(f) @requests_mock.Mocker() def test_fetch_a_ruleset(self, m): # setup mocked request uris service_url =", "s: s['id'] == self.rulesetid, self.rulesets_data[\"rulesets\"], ))[0] self.ruleset_data = { 'ruleset': self.rulesets, } path", "import unittest import os.path import requests_mock from pypd import Rulesets class IntegrationTestCase(unittest.TestCase): def", "json.load(f) @requests_mock.Mocker() def test_fetch_a_ruleset(self, m): # setup mocked request uris service_url = '{0}/rulesets/{1}'.format(", "data = rulesets.get_ruleset(self.rulesetid) self.assertEqual(len(data[\"routing_keys\"]), 1) self.assertEqual(data[\"routing_keys\"][0], \"<KEY>\") @requests_mock.Mocker() def test_fetch_all_rulesets(self, m): # setup", "with open(path) as f: self.rulesets_data = json.load(f) self.rulesetid = \"0e84de00-9511-4380-9f4f-a7b568bb49a0\" self.rulesets = list(filter(", "= { 'ruleset': self.rulesets, } path = os.path.join(base_path, 'sample_rules.json') with open(path) as f:", "from pypd import Rulesets class IntegrationTestCase(unittest.TestCase): def setUp(self): self.base_url = 'https://api.pagerduty.com' self.api_key =", "'sample_rulesets.json') with open(path) as f: self.rulesets_data = json.load(f) self.rulesetid = \"0e84de00-9511-4380-9f4f-a7b568bb49a0\" self.rulesets =", "self.limit = 25 base_path = os.path.join( os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'data') path = os.path.join(base_path, 'sample_rulesets.json') with", "setup mocked request uris service_url = '{0}/rulesets/{1}'.format( self.base_url, self.rulesetid, ) m.register_uri( 'GET', service_url,", "m.register_uri( 'GET', service_url, json=self.rulesets_data, complete_qs=False ) rulesets = Rulesets._fetch_all(api_key=self.api_key) data = rulesets[0].get_rulesets() self.assertEqual(len(data[0][\"routing_keys\"]),", "base_path = os.path.join( os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'data') path = os.path.join(base_path, 'sample_rulesets.json') with open(path) as f:", "mocked request uris service_url = '{0}/rulesets/{1}'.format( self.base_url, self.rulesetid, ) m.register_uri( 'GET', service_url, json=self.ruleset_data,", "uris service_url = '{0}/rulesets/{1}'.format( self.base_url, self.rulesetid, ) m.register_uri( 'GET', service_url, json=self.ruleset_data, complete_qs=False )", "1) self.assertEqual(data[\"routing_keys\"][0], \"<KEY>\") @requests_mock.Mocker() def test_fetch_all_rulesets(self, m): # setup mocked request uris service_url", "= 25 base_path = os.path.join( os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'data') path = os.path.join(base_path, 'sample_rulesets.json') with open(path)", "open(path) as f: self.rulesets_data = json.load(f) self.rulesetid = \"0e84de00-9511-4380-9f4f-a7b568bb49a0\" self.rulesets = list(filter( lambda", "self.rulesets_data[\"rulesets\"], ))[0] self.ruleset_data = { 'ruleset': self.rulesets, } path = os.path.join(base_path, 'sample_rules.json') with", "'data') path = os.path.join(base_path, 'sample_rulesets.json') with open(path) as f: self.rulesets_data = json.load(f) self.rulesetid", "self.rulesets_data = json.load(f) self.rulesetid = \"0e84de00-9511-4380-9f4f-a7b568bb49a0\" self.rulesets = list(filter( lambda s: s['id'] ==", "def test_fetch_all_rulesets(self, m): # setup mocked request uris service_url = '{0}/rulesets'.format( self.base_url )", "self.rulesetid = \"0e84de00-9511-4380-9f4f-a7b568bb49a0\" self.rulesets = list(filter( lambda s: s['id'] == self.rulesetid, self.rulesets_data[\"rulesets\"], ))[0]", "lambda s: s['id'] == self.rulesetid, self.rulesets_data[\"rulesets\"], ))[0] self.ruleset_data = { 'ruleset': self.rulesets, }", "details. import json import unittest import os.path import requests_mock from pypd import Rulesets", "os.path.join(base_path, 'sample_rulesets.json') with open(path) as f: self.rulesets_data = json.load(f) self.rulesetid = \"0e84de00-9511-4380-9f4f-a7b568bb49a0\" self.rulesets", "= 'https://api.pagerduty.com' self.api_key = 'FAUX_API_KEY' self.limit = 25 base_path = os.path.join( os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'data')", "m.register_uri( 'GET', service_url, json=self.ruleset_data, complete_qs=False ) rulesets = Rulesets.fetch(self.rulesetid, api_key=self.api_key) data = rulesets.get_ruleset(self.rulesetid)", "'GET', service_url, json=self.ruleset_data, complete_qs=False ) rulesets = Rulesets.fetch(self.rulesetid, api_key=self.api_key) data = rulesets.get_ruleset(self.rulesetid) self.assertEqual(len(data[\"routing_keys\"]),", "open(path) as f: self.rules_data = json.load(f) @requests_mock.Mocker() def test_fetch_a_ruleset(self, m): # setup mocked", "import Rulesets class IntegrationTestCase(unittest.TestCase): def setUp(self): self.base_url = 'https://api.pagerduty.com' self.api_key = 'FAUX_API_KEY' self.limit", "requests_mock from pypd import Rulesets class IntegrationTestCase(unittest.TestCase): def setUp(self): self.base_url = 'https://api.pagerduty.com' self.api_key", "json=self.ruleset_data, complete_qs=False ) rulesets = Rulesets.fetch(self.rulesetid, api_key=self.api_key) data = rulesets.get_ruleset(self.rulesetid) self.assertEqual(len(data[\"routing_keys\"]), 1) self.assertEqual(data[\"routing_keys\"][0],", "json import unittest import os.path import requests_mock from pypd import Rulesets class IntegrationTestCase(unittest.TestCase):", ") rulesets = Rulesets.fetch(self.rulesetid, api_key=self.api_key) data = rulesets.get_ruleset(self.rulesetid) self.assertEqual(len(data[\"routing_keys\"]), 1) self.assertEqual(data[\"routing_keys\"][0], \"<KEY>\") @requests_mock.Mocker()", "complete_qs=False ) rulesets = Rulesets.fetch(self.rulesetid, api_key=self.api_key) data = rulesets.get_ruleset(self.rulesetid) self.assertEqual(len(data[\"routing_keys\"]), 1) self.assertEqual(data[\"routing_keys\"][0], \"<KEY>\")", "See LICENSE for details. import json import unittest import os.path import requests_mock from", "os.path import requests_mock from pypd import Rulesets class IntegrationTestCase(unittest.TestCase): def setUp(self): self.base_url =", "s['id'] == self.rulesetid, self.rulesets_data[\"rulesets\"], ))[0] self.ruleset_data = { 'ruleset': self.rulesets, } path =", "request uris service_url = '{0}/rulesets/{1}'.format( self.base_url, self.rulesetid, ) m.register_uri( 'GET', service_url, json=self.ruleset_data, complete_qs=False", "json.load(f) self.rulesetid = \"0e84de00-9511-4380-9f4f-a7b568bb49a0\" self.rulesets = list(filter( lambda s: s['id'] == self.rulesetid, self.rulesets_data[\"rulesets\"],", "self.rulesetid, self.rulesets_data[\"rulesets\"], ))[0] self.ruleset_data = { 'ruleset': self.rulesets, } path = os.path.join(base_path, 'sample_rules.json')", "os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'data') path = os.path.join(base_path, 'sample_rulesets.json') with open(path) as f: self.rulesets_data = json.load(f)", "m): # setup mocked request uris service_url = '{0}/rulesets'.format( self.base_url ) m.register_uri( 'GET'," ]
[]
[ "class BaseMixin: def __init__(self, name: str): self.name = name def __repr__(self): return '<{module}.{cls}", "base classes for AWS\"\"\" class BaseMixin: def __init__(self, name: str): self.name = name", "BaseMixin: def __init__(self, name: str): self.name = name def __repr__(self): return '<{module}.{cls} {name}>'.format(", "def __init__(self, name: str): self.name = name def __repr__(self): return '<{module}.{cls} {name}>'.format( module=self.__class__.__module__,", "AWS\"\"\" class BaseMixin: def __init__(self, name: str): self.name = name def __repr__(self): return", "<reponame>ch41rmn/pylon-oss \"\"\"Some base classes for AWS\"\"\" class BaseMixin: def __init__(self, name: str): self.name", "\"\"\"Some base classes for AWS\"\"\" class BaseMixin: def __init__(self, name: str): self.name =", "str): self.name = name def __repr__(self): return '<{module}.{cls} {name}>'.format( module=self.__class__.__module__, cls=self.__class__.__name__, name=self.name )", "name: str): self.name = name def __repr__(self): return '<{module}.{cls} {name}>'.format( module=self.__class__.__module__, cls=self.__class__.__name__, name=self.name", "classes for AWS\"\"\" class BaseMixin: def __init__(self, name: str): self.name = name def", "__init__(self, name: str): self.name = name def __repr__(self): return '<{module}.{cls} {name}>'.format( module=self.__class__.__module__, cls=self.__class__.__name__,", "for AWS\"\"\" class BaseMixin: def __init__(self, name: str): self.name = name def __repr__(self):" ]
[ "enhance the parser with 'on the fly' data quality audit or export. \"\"\"", "] #start event callbacks self._end_callbacks = [ ] #end event callbacks self._children =", "\"\"\" return dict(self._element_ancestors) def registerStartEventCallback(self, func): \"\"\" Register a callback for start event.", "a unique identifier used at parsing time. - return: identifier \"\"\" self._id +=", "def registerStartEventCallback(self, func): \"\"\" Register a callback for start event. Note that return", "is ignored. Any exception raised by callback is not catched by handler, so", "??? def __enter__(self): \"\"\" Context manager entry point. \"\"\" self._id = 0 #unique", "passing the following arguments: - stack - locator The callbacks for end event", "element being read self._element_tags = Counter() #counter of element tags self._element_ancestors = defaultdict(set)", "element as it has too many children parent = None else: parent =", "#Cleaning identifier, name, attrs = self._stack.pop(-1) del self._children[identifier] def getTagsCount(self): \"\"\" Get a", "information from children. The stack is destroyed when end event occured. This enables", "element in XML dataset. This method is part of of xml.sax.ContentHandler interface and", "This enables to limit memory usage while parsing. The _stack internal variable stores", "self._stack = [ ] #current stack of element being read self._element_tags = Counter()", "are count \"\"\" return dict(self._element_tags) def getTagsAncestors(self): \"\"\" Get a dictionnary with tags", "self._start_callbacks.append(func) def registerEndEventCallback(self, func): \"\"\" Register a callback for end event. Note that", "called passing the following arguments: - element name - element children - locator", "any) if parent is not None: self._children[parent].append((name, attrs)) #Initialisation of own children self._children[identifier]", "callbacks for start event will be called passing the following arguments: - stack", "be called passing the following arguments: - element name - element children -", "all different ancestors path \"\"\" return dict(self._element_ancestors) def registerStartEventCallback(self, func): \"\"\" Register a", "self._children = { } #children elements of elements being read return self def", "being read self._element_tags = Counter() #counter of element tags self._element_ancestors = defaultdict(set) #collection", "interface and is overloaded here. - name: tag of element being read \"\"\"", "This enables to enhance the parser with 'on the fly' data quality audit", "as a context manager. The state of object keeps a trace of stack", "keys are tags and values are a sequence of all different ancestors path", "a context manager. The state of object keeps a trace of stack while", "in self._start_callbacks: callback(self._stack, self._locator) def endElement(self, name): \"\"\" Method invoked when ending to", "here. - name: tag of element being read \"\"\" #Get identifier identifier =", "unique identifier used at parsing time. - return: identifier \"\"\" self._id += 1", "(as provided by start event) - element attributes (as provided by start event)", "get the id. try: parent_tuple = self._stack[-1] if parent_tuple[1] == 'osm': #We ignore", "of elements being read return self def __exit__(self, *args): \"\"\" Context manager exit", "and locator as arguments. \"\"\" self._start_callbacks.append(func) def registerEndEventCallback(self, func): \"\"\" Register a callback", "arguments: - stack - locator The callbacks for end event will be called", "when end event occured. This enables to limit memory usage while parsing. The", "attributes \"\"\" #Get identifier for current element identifier = self._requestUniqueIdentifier() #Has element a", "itself. - func: a callable object taking stack and locator as arguments. \"\"\"", "children and locator as arguments. \"\"\" self._end_callbacks.append(func) def clearCallbacks(self): \"\"\" Remove all registered", "\"\"\" Method invoked when starting to read an element in XML dataset. This", "registered callbacks for callback in self._start_callbacks: callback(self._stack, self._locator) def endElement(self, name): \"\"\" Method", "#Has element a parent? If yes get the id. try: parent_tuple = self._stack[-1]", "\"\"\" xml.sax.ContentHandler.__init__(self) #super not working here ??? def __enter__(self): \"\"\" Context manager entry", "Register a callback for end event. Note that return value of callback is", "= { } #children elements of elements being read return self def __exit__(self,", "data quality audit or export. \"\"\" class OpenStreetMapXmlHandler(xml.sax.ContentHandler): def __init__(self): \"\"\" Constructor. This", "keeps trace of: - tags count - tags ancestors It is possible to", "context manager. The state of object keeps a trace of stack while parsing.", "callbacks. \"\"\" self._end_callbacks = [ ] self._start_callbacks = [ ] def _requestUniqueIdentifier(self): \"\"\"", "enables to enhance the parser with 'on the fly' data quality audit or", "while parsing. This enables to collect information from children. The stack is destroyed", "] self._start_callbacks = [ ] def _requestUniqueIdentifier(self): \"\"\" Return a unique identifier used", "or export. \"\"\" class OpenStreetMapXmlHandler(xml.sax.ContentHandler): def __init__(self): \"\"\" Constructor. This class is intended", "occured. This enables to limit memory usage while parsing. The _stack internal variable", "will be called passing the following arguments: - stack - locator The callbacks", "ignored. Any exception raised by callback is not catched by handler, so you", "here. - name: tag of element being read - attrs: element attributes \"\"\"", "event callbacks self._end_callbacks = [ ] #end event callbacks self._children = { }", "tag of element being read - attrs: element attributes \"\"\" #Get identifier for", "method is part of of xml.sax.ContentHandler interface and is overloaded here. - name:", "exception raised by callback is not catched by handler, so you should take", "return: dictionnary where keys are tags and values are a sequence of all", "functions for start or end events. The callbacks for start event will be", "Counter, defaultdict \"\"\" Custom handler for parsing OpenStreetMap XML files. While parsing the", "for end event will be called passing the following arguments: - element name", "attributes (as provided by start event) \"\"\" xml.sax.ContentHandler.__init__(self) #super not working here ???", "= None #Exploit current stack to get ancestor ancestor = \".\".join([s[1] for s", "where keys are tags and values are a sequence of all different ancestors", "keys are tags and values are count \"\"\" return dict(self._element_tags) def getTagsAncestors(self): \"\"\"", "[ ] #Update stack self._stack.append((identifier, name, attrs)) #Use registered callbacks for callback in", "for parsing OpenStreetMap XML files. While parsing the XML file, handler keeps trace", "ancestors. - return: dictionnary where keys are tags and values are a sequence", "the callback itself. - func: a callable object taking stack and locator as", "return value of callback is ignored. Any exception raised by callback is not", "the id. try: parent_tuple = self._stack[-1] if parent_tuple[1] == 'osm': #We ignore osm", "== 'osm': #We ignore osm element as it has too many children parent", "= [ ] def _requestUniqueIdentifier(self): \"\"\" Return a unique identifier used at parsing", "take care of catching all exceptions within the callback itself. - func: a", "self._locator) #Cleaning identifier, name, attrs = self._stack.pop(-1) del self._children[identifier] def getTagsCount(self): \"\"\" Get", "= defaultdict(set) #collection of ancestors per tag self._start_callbacks = [ ] #start event", "\"\"\" Register a callback for end event. Note that return value of callback", "parsing. This enables to collect information from children. The stack is destroyed when", "identifier incremented at self._stack = [ ] #current stack of element being read", "are tags and values are count \"\"\" return dict(self._element_tags) def getTagsAncestors(self): \"\"\" Get", "XML file, handler keeps trace of: - tags count - tags ancestors It", "IndexError: parent = None #Exploit current stack to get ancestor ancestor = \".\".join([s[1]", "handler class. This enables to enhance the parser with 'on the fly' data", "is ignored by the handler class. This enables to enhance the parser with", "\"\"\" #Get identifier identifier = self._stack[-1][0] #Use registered callbacks before element is cleaned", "= [ ] #current stack of element being read self._element_tags = Counter() #counter", "being read - attrs: element attributes \"\"\" #Get identifier for current element identifier", "The _stack internal variable stores tuples - element unique identifier - element name", "callback for end event. Note that return value of callback is ignored. Any", "- locator The callbacks for end event will be called passing the following", "This method is part of of xml.sax.ContentHandler interface and is overloaded here. -", "arguments: - element name - element children - locator Return value of callbacks", "self._children[identifier], self._locator) #Cleaning identifier, name, attrs = self._stack.pop(-1) del self._children[identifier] def getTagsCount(self): \"\"\"", "Any exception raised by callback is not catched by handler, so you should", "= parent_tuple[0] except IndexError: parent = None #Exploit current stack to get ancestor", "be used as a context manager. The state of object keeps a trace", "too many children parent = None else: parent = parent_tuple[0] except IndexError: parent", "name, element children and locator as arguments. \"\"\" self._end_callbacks.append(func) def clearCallbacks(self): \"\"\" Remove", "element name - element children - locator Return value of callbacks is ignored", "in self._end_callbacks: callback(name, self._children[identifier], self._locator) #Cleaning identifier, name, attrs = self._stack.pop(-1) del self._children[identifier]", "- func: a callable object taking stack and locator as arguments. \"\"\" self._start_callbacks.append(func)", "del self._children[identifier] def getTagsCount(self): \"\"\" Get a dictionnary with tags count. - return:", "XML dataset. This method is part of of xml.sax.ContentHandler interface and is overloaded", "before element is cleaned for callback in self._end_callbacks: callback(name, self._children[identifier], self._locator) #Cleaning identifier,", "Return a unique identifier used at parsing time. - return: identifier \"\"\" self._id", "event occured. This enables to limit memory usage while parsing. The _stack internal", "tags count - tags ancestors It is possible to register callback functions for", "as arguments. \"\"\" self._start_callbacks.append(func) def registerEndEventCallback(self, func): \"\"\" Register a callback for end", "callbacks self._end_callbacks = [ ] #end event callbacks self._children = { } #children", "} #children elements of elements being read return self def __exit__(self, *args): \"\"\"", "\"\"\" #Get identifier for current element identifier = self._requestUniqueIdentifier() #Has element a parent?", "audit or export. \"\"\" class OpenStreetMapXmlHandler(xml.sax.ContentHandler): def __init__(self): \"\"\" Constructor. This class is", "of stack while parsing. This enables to collect information from children. The stack", "- name: tag of element being read \"\"\" #Get identifier identifier = self._stack[-1][0]", "for start event will be called passing the following arguments: - stack -", "exceptions within the callback itself. - func: a callable object taking stack and", "being read return self def __exit__(self, *args): \"\"\" Context manager exit point. \"\"\"", "self._element_ancestors[name].add(ancestor) #Update tag counter self._element_tags[name] += 1 #Update parent children (if any) if", "\"\"\" self._end_callbacks.append(func) def clearCallbacks(self): \"\"\" Remove all registered callbacks. \"\"\" self._end_callbacks = [", "#We ignore osm element as it has too many children parent = None", "a sequence of all different ancestors path \"\"\" return dict(self._element_ancestors) def registerStartEventCallback(self, func):", "- stack - locator The callbacks for end event will be called passing", "value of callbacks is ignored by the handler class. This enables to enhance", "incremented at self._stack = [ ] #current stack of element being read self._element_tags", "not None: self._children[parent].append((name, attrs)) #Initialisation of own children self._children[identifier] = [ ] #Update", "callback(self._stack, self._locator) def endElement(self, name): \"\"\" Method invoked when ending to read an", "count - tags ancestors It is possible to register callback functions for start", "- element unique identifier - element name (as provided by start event) -", "by callback is not catched by handler, so you should take care of", "import Counter, defaultdict \"\"\" Custom handler for parsing OpenStreetMap XML files. While parsing", "registered callbacks before element is cleaned for callback in self._end_callbacks: callback(name, self._children[identifier], self._locator)", "a callable object taking stack and locator as arguments. \"\"\" self._start_callbacks.append(func) def registerEndEventCallback(self,", "tags ancestors It is possible to register callback functions for start or end", "[ ] def _requestUniqueIdentifier(self): \"\"\" Return a unique identifier used at parsing time.", "taking stack and locator as arguments. \"\"\" self._start_callbacks.append(func) def registerEndEventCallback(self, func): \"\"\" Register", "is part of of xml.sax.ContentHandler interface and is overloaded here. - name: tag", "called passing the following arguments: - stack - locator The callbacks for end", "\"\"\" Method invoked when ending to read an element in XML dataset. This", "parent_tuple[1] == 'osm': #We ignore osm element as it has too many children", "def __init__(self): \"\"\" Constructor. This class is intended to be used as a", "None: self._children[parent].append((name, attrs)) #Initialisation of own children self._children[identifier] = [ ] #Update stack", "of object keeps a trace of stack while parsing. This enables to collect", "read - attrs: element attributes \"\"\" #Get identifier for current element identifier =", "to collect information from children. The stack is destroyed when end event occured.", "Get a dictionnary with tags count. - return: dictionnary where keys are tags", "exceptions within the callback itself. - func: a callable object taking element name,", "and values are count \"\"\" return dict(self._element_tags) def getTagsAncestors(self): \"\"\" Get a dictionnary", "self._id = 0 #unique identifier incremented at self._stack = [ ] #current stack", "name: tag of element being read - attrs: element attributes \"\"\" #Get identifier", "trace of: - tags count - tags ancestors It is possible to register", "func: a callable object taking element name, element children and locator as arguments.", "parent_tuple = self._stack[-1] if parent_tuple[1] == 'osm': #We ignore osm element as it", "to get ancestor ancestor = \".\".join([s[1] for s in self._stack]) self._element_ancestors[name].add(ancestor) #Update tag", "handler, so you should take care of catching all exceptions within the callback", "= [ ] #end event callbacks self._children = { } #children elements of", "*args): \"\"\" Context manager exit point. \"\"\" pass def startElement(self, name, attrs): \"\"\"", "func): \"\"\" Register a callback for start event. Note that return value of", "#Use registered callbacks before element is cleaned for callback in self._end_callbacks: callback(name, self._children[identifier],", "return self def __exit__(self, *args): \"\"\" Context manager exit point. \"\"\" pass def", "\"\"\" Context manager entry point. \"\"\" self._id = 0 #unique identifier incremented at", "end event will be called passing the following arguments: - element name -", "elements of elements being read return self def __exit__(self, *args): \"\"\" Context manager", "dataset. This method is part of of xml.sax.ContentHandler interface and is overloaded here.", "except IndexError: parent = None #Exploit current stack to get ancestor ancestor =", "callback itself. - func: a callable object taking stack and locator as arguments.", "possible to register callback functions for start or end events. The callbacks for", "element identifier = self._requestUniqueIdentifier() #Has element a parent? If yes get the id.", "dict(self._element_ancestors) def registerStartEventCallback(self, func): \"\"\" Register a callback for start event. Note that", "= Counter() #counter of element tags self._element_ancestors = defaultdict(set) #collection of ancestors per", "tag of element being read \"\"\" #Get identifier identifier = self._stack[-1][0] #Use registered", "callback(name, self._children[identifier], self._locator) #Cleaning identifier, name, attrs = self._stack.pop(-1) del self._children[identifier] def getTagsCount(self):", "= None else: parent = parent_tuple[0] except IndexError: parent = None #Exploit current", "is not catched by handler, so you should take care of catching all", "] def _requestUniqueIdentifier(self): \"\"\" Return a unique identifier used at parsing time. -", "usage while parsing. The _stack internal variable stores tuples - element unique identifier", "] #end event callbacks self._children = { } #children elements of elements being", "= [ ] #Update stack self._stack.append((identifier, name, attrs)) #Use registered callbacks for callback", "count. - return: dictionnary where keys are tags and values are count \"\"\"", "a callback for end event. Note that return value of callback is ignored.", "\"\"\" self._end_callbacks = [ ] self._start_callbacks = [ ] def _requestUniqueIdentifier(self): \"\"\" Return", "self._children[parent].append((name, attrs)) #Initialisation of own children self._children[identifier] = [ ] #Update stack self._stack.append((identifier,", "manager exit point. \"\"\" pass def startElement(self, name, attrs): \"\"\" Method invoked when", "#Initialisation of own children self._children[identifier] = [ ] #Update stack self._stack.append((identifier, name, attrs))", "per tag self._start_callbacks = [ ] #start event callbacks self._end_callbacks = [ ]", "as arguments. \"\"\" self._end_callbacks.append(func) def clearCallbacks(self): \"\"\" Remove all registered callbacks. \"\"\" self._end_callbacks", "defaultdict(set) #collection of ancestors per tag self._start_callbacks = [ ] #start event callbacks", "xml.sax.ContentHandler.__init__(self) #super not working here ??? def __enter__(self): \"\"\" Context manager entry point.", "a callback for start event. Note that return value of callback is ignored.", "where keys are tags and values are count \"\"\" return dict(self._element_tags) def getTagsAncestors(self):", "with 'on the fly' data quality audit or export. \"\"\" class OpenStreetMapXmlHandler(xml.sax.ContentHandler): def", "entry point. \"\"\" self._id = 0 #unique identifier incremented at self._stack = [", "\"\"\" self._id = 0 #unique identifier incremented at self._stack = [ ] #current", "by start event) \"\"\" xml.sax.ContentHandler.__init__(self) #super not working here ??? def __enter__(self): \"\"\"", "following arguments: - element name - element children - locator Return value of", "\"\"\" class OpenStreetMapXmlHandler(xml.sax.ContentHandler): def __init__(self): \"\"\" Constructor. This class is intended to be", "- element name - element children - locator Return value of callbacks is", "handler keeps trace of: - tags count - tags ancestors It is possible", "self._requestUniqueIdentifier() #Has element a parent? If yes get the id. try: parent_tuple =", "def startElement(self, name, attrs): \"\"\" Method invoked when starting to read an element", "self._children[identifier] def getTagsCount(self): \"\"\" Get a dictionnary with tags count. - return: dictionnary", "has too many children parent = None else: parent = parent_tuple[0] except IndexError:", "- name: tag of element being read - attrs: element attributes \"\"\" #Get", "'osm': #We ignore osm element as it has too many children parent =", "_stack internal variable stores tuples - element unique identifier - element name (as", "of element being read self._element_tags = Counter() #counter of element tags self._element_ancestors =", "collections import Counter, defaultdict \"\"\" Custom handler for parsing OpenStreetMap XML files. While", "for s in self._stack]) self._element_ancestors[name].add(ancestor) #Update tag counter self._element_tags[name] += 1 #Update parent", "attrs)) #Use registered callbacks for callback in self._start_callbacks: callback(self._stack, self._locator) def endElement(self, name):", "tags self._element_ancestors = defaultdict(set) #collection of ancestors per tag self._start_callbacks = [ ]", "it has too many children parent = None else: parent = parent_tuple[0] except", "getTagsAncestors(self): \"\"\" Get a dictionnary with tags ancestors. - return: dictionnary where keys", "file, handler keeps trace of: - tags count - tags ancestors It is", "different ancestors path \"\"\" return dict(self._element_ancestors) def registerStartEventCallback(self, func): \"\"\" Register a callback", "sequence of all different ancestors path \"\"\" return dict(self._element_ancestors) def registerStartEventCallback(self, func): \"\"\"", "name - element children - locator Return value of callbacks is ignored by", "object keeps a trace of stack while parsing. This enables to collect information", "not catched by handler, so you should take care of catching all exceptions", "def getTagsCount(self): \"\"\" Get a dictionnary with tags count. - return: dictionnary where", "by the handler class. This enables to enhance the parser with 'on the", "yes get the id. try: parent_tuple = self._stack[-1] if parent_tuple[1] == 'osm': #We", "get ancestor ancestor = \".\".join([s[1] for s in self._stack]) self._element_ancestors[name].add(ancestor) #Update tag counter", "\"\"\" Constructor. This class is intended to be used as a context manager.", "self._end_callbacks = [ ] #end event callbacks self._children = { } #children elements", "within the callback itself. - func: a callable object taking element name, element", "ancestor = \".\".join([s[1] for s in self._stack]) self._element_ancestors[name].add(ancestor) #Update tag counter self._element_tags[name] +=", "callback itself. - func: a callable object taking element name, element children and", "of all different ancestors path \"\"\" return dict(self._element_ancestors) def registerStartEventCallback(self, func): \"\"\" Register", "provided by start event) \"\"\" xml.sax.ContentHandler.__init__(self) #super not working here ??? def __enter__(self):", "end event. Note that return value of callback is ignored. Any exception raised", "parent = parent_tuple[0] except IndexError: parent = None #Exploit current stack to get", "locator The callbacks for end event will be called passing the following arguments:", "The callbacks for start event will be called passing the following arguments: -", "def registerEndEventCallback(self, func): \"\"\" Register a callback for end event. Note that return", "from children. The stack is destroyed when end event occured. This enables to", "callbacks self._children = { } #children elements of elements being read return self", "children. The stack is destroyed when end event occured. This enables to limit", "Method invoked when starting to read an element in XML dataset. This method", "callback for start event. Note that return value of callback is ignored. Any", "self._start_callbacks: callback(self._stack, self._locator) def endElement(self, name): \"\"\" Method invoked when ending to read", "OpenStreetMapXmlHandler(xml.sax.ContentHandler): def __init__(self): \"\"\" Constructor. This class is intended to be used as", "all registered callbacks. \"\"\" self._end_callbacks = [ ] self._start_callbacks = [ ] def", "start event will be called passing the following arguments: - stack - locator", "callbacks before element is cleaned for callback in self._end_callbacks: callback(name, self._children[identifier], self._locator) #Cleaning", "itself. - func: a callable object taking element name, element children and locator", "This enables to collect information from children. The stack is destroyed when end", "OpenStreetMap XML files. While parsing the XML file, handler keeps trace of: -", "name: tag of element being read \"\"\" #Get identifier identifier = self._stack[-1][0] #Use", "internal variable stores tuples - element unique identifier - element name (as provided", "The stack is destroyed when end event occured. This enables to limit memory", "] #Update stack self._stack.append((identifier, name, attrs)) #Use registered callbacks for callback in self._start_callbacks:", "\".\".join([s[1] for s in self._stack]) self._element_ancestors[name].add(ancestor) #Update tag counter self._element_tags[name] += 1 #Update", "#counter of element tags self._element_ancestors = defaultdict(set) #collection of ancestors per tag self._start_callbacks", "def getTagsAncestors(self): \"\"\" Get a dictionnary with tags ancestors. - return: dictionnary where", "<gh_stars>1-10 import xml.sax from collections import Counter, defaultdict \"\"\" Custom handler for parsing", "func: a callable object taking stack and locator as arguments. \"\"\" self._start_callbacks.append(func) def", "[ ] #start event callbacks self._end_callbacks = [ ] #end event callbacks self._children", "children self._children[identifier] = [ ] #Update stack self._stack.append((identifier, name, attrs)) #Use registered callbacks", "destroyed when end event occured. This enables to limit memory usage while parsing.", "endElement(self, name): \"\"\" Method invoked when ending to read an element in XML", "enables to limit memory usage while parsing. The _stack internal variable stores tuples", "stack to get ancestor ancestor = \".\".join([s[1] for s in self._stack]) self._element_ancestors[name].add(ancestor) #Update", "tags ancestors. - return: dictionnary where keys are tags and values are a", "= self._requestUniqueIdentifier() #Has element a parent? If yes get the id. try: parent_tuple", "return dict(self._element_ancestors) def registerStartEventCallback(self, func): \"\"\" Register a callback for start event. Note", "stack is destroyed when end event occured. This enables to limit memory usage", "children - locator Return value of callbacks is ignored by the handler class.", "ancestors path \"\"\" return dict(self._element_ancestors) def registerStartEventCallback(self, func): \"\"\" Register a callback for", "Custom handler for parsing OpenStreetMap XML files. While parsing the XML file, handler", "element name, element children and locator as arguments. \"\"\" self._end_callbacks.append(func) def clearCallbacks(self): \"\"\"", "a trace of stack while parsing. This enables to collect information from children.", "It is possible to register callback functions for start or end events. The", "dictionnary with tags ancestors. - return: dictionnary where keys are tags and values", "name, attrs): \"\"\" Method invoked when starting to read an element in XML", "class is intended to be used as a context manager. The state of", "#current stack of element being read self._element_tags = Counter() #counter of element tags", "'on the fly' data quality audit or export. \"\"\" class OpenStreetMapXmlHandler(xml.sax.ContentHandler): def __init__(self):", "manager. The state of object keeps a trace of stack while parsing. This", "name, attrs)) #Use registered callbacks for callback in self._start_callbacks: callback(self._stack, self._locator) def endElement(self,", "def clearCallbacks(self): \"\"\" Remove all registered callbacks. \"\"\" self._end_callbacks = [ ] self._start_callbacks", "\"\"\" Context manager exit point. \"\"\" pass def startElement(self, name, attrs): \"\"\" Method", "Constructor. This class is intended to be used as a context manager. The", "collect information from children. The stack is destroyed when end event occured. This", "\"\"\" pass def startElement(self, name, attrs): \"\"\" Method invoked when starting to read", "within the callback itself. - func: a callable object taking stack and locator", "self._element_tags[name] += 1 #Update parent children (if any) if parent is not None:", "callback in self._start_callbacks: callback(self._stack, self._locator) def endElement(self, name): \"\"\" Method invoked when ending", "is overloaded here. - name: tag of element being read \"\"\" #Get identifier", "event will be called passing the following arguments: - stack - locator The", "a parent? If yes get the id. try: parent_tuple = self._stack[-1] if parent_tuple[1]", "of catching all exceptions within the callback itself. - func: a callable object", "by handler, so you should take care of catching all exceptions within the", "parent children (if any) if parent is not None: self._children[parent].append((name, attrs)) #Initialisation of", "while parsing. The _stack internal variable stores tuples - element unique identifier -", "of element being read \"\"\" #Get identifier identifier = self._stack[-1][0] #Use registered callbacks", "callback in self._end_callbacks: callback(name, self._children[identifier], self._locator) #Cleaning identifier, name, attrs = self._stack.pop(-1) del", "dictionnary where keys are tags and values are count \"\"\" return dict(self._element_tags) def", "all exceptions within the callback itself. - func: a callable object taking stack", "of: - tags count - tags ancestors It is possible to register callback", "self._end_callbacks: callback(name, self._children[identifier], self._locator) #Cleaning identifier, name, attrs = self._stack.pop(-1) del self._children[identifier] def", "value of callback is ignored. Any exception raised by callback is not catched", "parsing the XML file, handler keeps trace of: - tags count - tags", "locator Return value of callbacks is ignored by the handler class. This enables", "element name (as provided by start event) - element attributes (as provided by", "\"\"\" Return a unique identifier used at parsing time. - return: identifier \"\"\"", "self._element_tags = Counter() #counter of element tags self._element_ancestors = defaultdict(set) #collection of ancestors", "stores tuples - element unique identifier - element name (as provided by start", "If yes get the id. try: parent_tuple = self._stack[-1] if parent_tuple[1] == 'osm':", "register callback functions for start or end events. The callbacks for start event", "attrs): \"\"\" Method invoked when starting to read an element in XML dataset.", "of xml.sax.ContentHandler interface and is overloaded here. - name: tag of element being", "[ ] #end event callbacks self._children = { } #children elements of elements", "event callbacks self._children = { } #children elements of elements being read return", "try: parent_tuple = self._stack[-1] if parent_tuple[1] == 'osm': #We ignore osm element as", "current stack to get ancestor ancestor = \".\".join([s[1] for s in self._stack]) self._element_ancestors[name].add(ancestor)", "Remove all registered callbacks. \"\"\" self._end_callbacks = [ ] self._start_callbacks = [ ]", "callbacks for end event will be called passing the following arguments: - element", "event) - element attributes (as provided by start event) \"\"\" xml.sax.ContentHandler.__init__(self) #super not", "\"\"\" Get a dictionnary with tags ancestors. - return: dictionnary where keys are", "events. The callbacks for start event will be called passing the following arguments:", "quality audit or export. \"\"\" class OpenStreetMapXmlHandler(xml.sax.ContentHandler): def __init__(self): \"\"\" Constructor. This class", "with tags count. - return: dictionnary where keys are tags and values are", "startElement(self, name, attrs): \"\"\" Method invoked when starting to read an element in", "for current element identifier = self._requestUniqueIdentifier() #Has element a parent? If yes get", "the callback itself. - func: a callable object taking element name, element children", "#unique identifier incremented at self._stack = [ ] #current stack of element being", "start or end events. The callbacks for start event will be called passing", "catched by handler, so you should take care of catching all exceptions within", "return: dictionnary where keys are tags and values are count \"\"\" return dict(self._element_tags)", "keeps a trace of stack while parsing. This enables to collect information from", "read return self def __exit__(self, *args): \"\"\" Context manager exit point. \"\"\" pass", "are a sequence of all different ancestors path \"\"\" return dict(self._element_ancestors) def registerStartEventCallback(self,", "is intended to be used as a context manager. The state of object", "arguments. \"\"\" self._start_callbacks.append(func) def registerEndEventCallback(self, func): \"\"\" Register a callback for end event.", "a callable object taking element name, element children and locator as arguments. \"\"\"", "Register a callback for start event. Note that return value of callback is", "the XML file, handler keeps trace of: - tags count - tags ancestors", "element tags self._element_ancestors = defaultdict(set) #collection of ancestors per tag self._start_callbacks = [", "defaultdict \"\"\" Custom handler for parsing OpenStreetMap XML files. While parsing the XML", "of element being read - attrs: element attributes \"\"\" #Get identifier for current", "when starting to read an element in XML dataset. This method is part", "+= 1 #Update parent children (if any) if parent is not None: self._children[parent].append((name,", "Counter() #counter of element tags self._element_ancestors = defaultdict(set) #collection of ancestors per tag", "ancestor ancestor = \".\".join([s[1] for s in self._stack]) self._element_ancestors[name].add(ancestor) #Update tag counter self._element_tags[name]", "dictionnary where keys are tags and values are a sequence of all different", "element a parent? If yes get the id. try: parent_tuple = self._stack[-1] if", "of ancestors per tag self._start_callbacks = [ ] #start event callbacks self._end_callbacks =", "provided by start event) - element attributes (as provided by start event) \"\"\"", "self._start_callbacks = [ ] def _requestUniqueIdentifier(self): \"\"\" Return a unique identifier used at", "end event occured. This enables to limit memory usage while parsing. The _stack", "callable object taking element name, element children and locator as arguments. \"\"\" self._end_callbacks.append(func)", "self._stack]) self._element_ancestors[name].add(ancestor) #Update tag counter self._element_tags[name] += 1 #Update parent children (if any)", "= self._stack.pop(-1) del self._children[identifier] def getTagsCount(self): \"\"\" Get a dictionnary with tags count.", "at self._stack = [ ] #current stack of element being read self._element_tags =", "used as a context manager. The state of object keeps a trace of", "- return: dictionnary where keys are tags and values are count \"\"\" return", "#super not working here ??? def __enter__(self): \"\"\" Context manager entry point. \"\"\"", "object taking stack and locator as arguments. \"\"\" self._start_callbacks.append(func) def registerEndEventCallback(self, func): \"\"\"", "xml.sax.ContentHandler interface and is overloaded here. - name: tag of element being read", "parsing OpenStreetMap XML files. While parsing the XML file, handler keeps trace of:", "#start event callbacks self._end_callbacks = [ ] #end event callbacks self._children = {", "parent = None #Exploit current stack to get ancestor ancestor = \".\".join([s[1] for", "enables to collect information from children. The stack is destroyed when end event", "identifier used at parsing time. - return: identifier \"\"\" self._id += 1 return", "self._stack[-1][0] #Use registered callbacks before element is cleaned for callback in self._end_callbacks: callback(name,", "attrs: element attributes \"\"\" #Get identifier for current element identifier = self._requestUniqueIdentifier() #Has", "\"\"\" self._start_callbacks.append(func) def registerEndEventCallback(self, func): \"\"\" Register a callback for end event. Note", "#children elements of elements being read return self def __exit__(self, *args): \"\"\" Context", "be called passing the following arguments: - stack - locator The callbacks for", "#collection of ancestors per tag self._start_callbacks = [ ] #start event callbacks self._end_callbacks", "callback is ignored. Any exception raised by callback is not catched by handler,", "- func: a callable object taking element name, element children and locator as", "to read an element in XML dataset. This method is part of of", "being read \"\"\" #Get identifier identifier = self._stack[-1][0] #Use registered callbacks before element", "else: parent = parent_tuple[0] except IndexError: parent = None #Exploit current stack to", "parent_tuple[0] except IndexError: parent = None #Exploit current stack to get ancestor ancestor", "overloaded here. - name: tag of element being read \"\"\" #Get identifier identifier", "tags count. - return: dictionnary where keys are tags and values are count", "variable stores tuples - element unique identifier - element name (as provided by", "identifier = self._requestUniqueIdentifier() #Has element a parent? If yes get the id. try:", "#Get identifier identifier = self._stack[-1][0] #Use registered callbacks before element is cleaned for", "export. \"\"\" class OpenStreetMapXmlHandler(xml.sax.ContentHandler): def __init__(self): \"\"\" Constructor. This class is intended to", "Note that return value of callback is ignored. Any exception raised by callback", "event) \"\"\" xml.sax.ContentHandler.__init__(self) #super not working here ??? def __enter__(self): \"\"\" Context manager", "return dict(self._element_tags) def getTagsAncestors(self): \"\"\" Get a dictionnary with tags ancestors. - return:", "class OpenStreetMapXmlHandler(xml.sax.ContentHandler): def __init__(self): \"\"\" Constructor. This class is intended to be used", "identifier identifier = self._stack[-1][0] #Use registered callbacks before element is cleaned for callback", "catching all exceptions within the callback itself. - func: a callable object taking", "#Update tag counter self._element_tags[name] += 1 #Update parent children (if any) if parent", "None else: parent = parent_tuple[0] except IndexError: parent = None #Exploit current stack", "start event. Note that return value of callback is ignored. Any exception raised", "#Exploit current stack to get ancestor ancestor = \".\".join([s[1] for s in self._stack])", "element attributes (as provided by start event) \"\"\" xml.sax.ContentHandler.__init__(self) #super not working here", "stack self._stack.append((identifier, name, attrs)) #Use registered callbacks for callback in self._start_callbacks: callback(self._stack, self._locator)", "for start event. Note that return value of callback is ignored. Any exception", "pass def startElement(self, name, attrs): \"\"\" Method invoked when starting to read an", "identifier = self._stack[-1][0] #Use registered callbacks before element is cleaned for callback in", "interface and is overloaded here. - name: tag of element being read -", "s in self._stack]) self._element_ancestors[name].add(ancestor) #Update tag counter self._element_tags[name] += 1 #Update parent children", "for end event. Note that return value of callback is ignored. Any exception", "The callbacks for end event will be called passing the following arguments: -", "will be called passing the following arguments: - element name - element children", "getTagsCount(self): \"\"\" Get a dictionnary with tags count. - return: dictionnary where keys", "ending to read an element in XML dataset. This method is part of", "def _requestUniqueIdentifier(self): \"\"\" Return a unique identifier used at parsing time. - return:", "\"\"\" return dict(self._element_tags) def getTagsAncestors(self): \"\"\" Get a dictionnary with tags ancestors. -", "children parent = None else: parent = parent_tuple[0] except IndexError: parent = None", "counter self._element_tags[name] += 1 #Update parent children (if any) if parent is not", "is overloaded here. - name: tag of element being read - attrs: element", "__enter__(self): \"\"\" Context manager entry point. \"\"\" self._id = 0 #unique identifier incremented", "osm element as it has too many children parent = None else: parent", "parent is not None: self._children[parent].append((name, attrs)) #Initialisation of own children self._children[identifier] = [", "- tags count - tags ancestors It is possible to register callback functions", "should take care of catching all exceptions within the callback itself. - func:", "= self._stack[-1] if parent_tuple[1] == 'osm': #We ignore osm element as it has", "used at parsing time. - return: identifier \"\"\" self._id += 1 return self._id", "While parsing the XML file, handler keeps trace of: - tags count -", "callbacks for callback in self._start_callbacks: callback(self._stack, self._locator) def endElement(self, name): \"\"\" Method invoked", "callback functions for start or end events. The callbacks for start event will", "the parser with 'on the fly' data quality audit or export. \"\"\" class", "- attrs: element attributes \"\"\" #Get identifier for current element identifier = self._requestUniqueIdentifier()", "to register callback functions for start or end events. The callbacks for start", "handler for parsing OpenStreetMap XML files. While parsing the XML file, handler keeps", "tuples - element unique identifier - element name (as provided by start event)", "The state of object keeps a trace of stack while parsing. This enables", "Get a dictionnary with tags ancestors. - return: dictionnary where keys are tags", "own children self._children[identifier] = [ ] #Update stack self._stack.append((identifier, name, attrs)) #Use registered", "unique identifier - element name (as provided by start event) - element attributes", "element is cleaned for callback in self._end_callbacks: callback(name, self._children[identifier], self._locator) #Cleaning identifier, name,", "= self._stack[-1][0] #Use registered callbacks before element is cleaned for callback in self._end_callbacks:", "passing the following arguments: - element name - element children - locator Return", "self._stack.pop(-1) del self._children[identifier] def getTagsCount(self): \"\"\" Get a dictionnary with tags count. -", "name, attrs = self._stack.pop(-1) del self._children[identifier] def getTagsCount(self): \"\"\" Get a dictionnary with", "invoked when ending to read an element in XML dataset. This method is", "import xml.sax from collections import Counter, defaultdict \"\"\" Custom handler for parsing OpenStreetMap", "elements being read return self def __exit__(self, *args): \"\"\" Context manager exit point.", "starting to read an element in XML dataset. This method is part of", "tags and values are a sequence of all different ancestors path \"\"\" return", "\"\"\" Remove all registered callbacks. \"\"\" self._end_callbacks = [ ] self._start_callbacks = [", "and is overloaded here. - name: tag of element being read \"\"\" #Get", "\"\"\" Custom handler for parsing OpenStreetMap XML files. While parsing the XML file,", "callable object taking stack and locator as arguments. \"\"\" self._start_callbacks.append(func) def registerEndEventCallback(self, func):", "a dictionnary with tags count. - return: dictionnary where keys are tags and", "of callbacks is ignored by the handler class. This enables to enhance the", "def endElement(self, name): \"\"\" Method invoked when ending to read an element in", "[ ] self._start_callbacks = [ ] def _requestUniqueIdentifier(self): \"\"\" Return a unique identifier", "\"\"\" Register a callback for start event. Note that return value of callback", "- element children - locator Return value of callbacks is ignored by the", "self._start_callbacks = [ ] #start event callbacks self._end_callbacks = [ ] #end event", "element attributes \"\"\" #Get identifier for current element identifier = self._requestUniqueIdentifier() #Has element", "dictionnary with tags count. - return: dictionnary where keys are tags and values", "start event) \"\"\" xml.sax.ContentHandler.__init__(self) #super not working here ??? def __enter__(self): \"\"\" Context", "xml.sax from collections import Counter, defaultdict \"\"\" Custom handler for parsing OpenStreetMap XML", "- locator Return value of callbacks is ignored by the handler class. This", "count \"\"\" return dict(self._element_tags) def getTagsAncestors(self): \"\"\" Get a dictionnary with tags ancestors.", "self._locator) def endElement(self, name): \"\"\" Method invoked when ending to read an element", "parsing. The _stack internal variable stores tuples - element unique identifier - element", "and values are a sequence of all different ancestors path \"\"\" return dict(self._element_ancestors)", "= 0 #unique identifier incremented at self._stack = [ ] #current stack of", "as it has too many children parent = None else: parent = parent_tuple[0]", "clearCallbacks(self): \"\"\" Remove all registered callbacks. \"\"\" self._end_callbacks = [ ] self._start_callbacks =", "element being read - attrs: element attributes \"\"\" #Get identifier for current element", "name (as provided by start event) - element attributes (as provided by start", "identifier, name, attrs = self._stack.pop(-1) del self._children[identifier] def getTagsCount(self): \"\"\" Get a dictionnary", "for start or end events. The callbacks for start event will be called", "of element tags self._element_ancestors = defaultdict(set) #collection of ancestors per tag self._start_callbacks =", "parent = None else: parent = parent_tuple[0] except IndexError: parent = None #Exploit", "values are count \"\"\" return dict(self._element_tags) def getTagsAncestors(self): \"\"\" Get a dictionnary with", "by start event) - element attributes (as provided by start event) \"\"\" xml.sax.ContentHandler.__init__(self)", "registerEndEventCallback(self, func): \"\"\" Register a callback for end event. Note that return value", "end events. The callbacks for start event will be called passing the following", "parent? If yes get the id. try: parent_tuple = self._stack[-1] if parent_tuple[1] ==", "is destroyed when end event occured. This enables to limit memory usage while", "stack while parsing. This enables to collect information from children. The stack is", "event. Note that return value of callback is ignored. Any exception raised by", "__init__(self): \"\"\" Constructor. This class is intended to be used as a context", "raised by callback is not catched by handler, so you should take care", "are tags and values are a sequence of all different ancestors path \"\"\"", "intended to be used as a context manager. The state of object keeps", "ignore osm element as it has too many children parent = None else:", "element children and locator as arguments. \"\"\" self._end_callbacks.append(func) def clearCallbacks(self): \"\"\" Remove all", "many children parent = None else: parent = parent_tuple[0] except IndexError: parent =", "cleaned for callback in self._end_callbacks: callback(name, self._children[identifier], self._locator) #Cleaning identifier, name, attrs =", "the fly' data quality audit or export. \"\"\" class OpenStreetMapXmlHandler(xml.sax.ContentHandler): def __init__(self): \"\"\"", "{ } #children elements of elements being read return self def __exit__(self, *args):", "arguments. \"\"\" self._end_callbacks.append(func) def clearCallbacks(self): \"\"\" Remove all registered callbacks. \"\"\" self._end_callbacks =", "stack and locator as arguments. \"\"\" self._start_callbacks.append(func) def registerEndEventCallback(self, func): \"\"\" Register a", "read an element in XML dataset. This method is part of of xml.sax.ContentHandler", "manager entry point. \"\"\" self._id = 0 #unique identifier incremented at self._stack =", "stack of element being read self._element_tags = Counter() #counter of element tags self._element_ancestors", "attrs)) #Initialisation of own children self._children[identifier] = [ ] #Update stack self._stack.append((identifier, name,", "state of object keeps a trace of stack while parsing. This enables to", "XML files. While parsing the XML file, handler keeps trace of: - tags", "identifier for current element identifier = self._requestUniqueIdentifier() #Has element a parent? If yes", "point. \"\"\" pass def startElement(self, name, attrs): \"\"\" Method invoked when starting to", "to enhance the parser with 'on the fly' data quality audit or export.", "for callback in self._start_callbacks: callback(self._stack, self._locator) def endElement(self, name): \"\"\" Method invoked when", "__exit__(self, *args): \"\"\" Context manager exit point. \"\"\" pass def startElement(self, name, attrs):", "#Get identifier for current element identifier = self._requestUniqueIdentifier() #Has element a parent? If", "read \"\"\" #Get identifier identifier = self._stack[-1][0] #Use registered callbacks before element is", "\"\"\" Get a dictionnary with tags count. - return: dictionnary where keys are", "Method invoked when ending to read an element in XML dataset. This method", "and is overloaded here. - name: tag of element being read - attrs:", "stack - locator The callbacks for end event will be called passing the", "self._end_callbacks.append(func) def clearCallbacks(self): \"\"\" Remove all registered callbacks. \"\"\" self._end_callbacks = [ ]", "to limit memory usage while parsing. The _stack internal variable stores tuples -", "#Update parent children (if any) if parent is not None: self._children[parent].append((name, attrs)) #Initialisation", "(as provided by start event) \"\"\" xml.sax.ContentHandler.__init__(self) #super not working here ??? def", "values are a sequence of all different ancestors path \"\"\" return dict(self._element_ancestors) def", "_requestUniqueIdentifier(self): \"\"\" Return a unique identifier used at parsing time. - return: identifier", "= [ ] #start event callbacks self._end_callbacks = [ ] #end event callbacks", "that return value of callback is ignored. Any exception raised by callback is", "identifier - element name (as provided by start event) - element attributes (as", "dict(self._element_tags) def getTagsAncestors(self): \"\"\" Get a dictionnary with tags ancestors. - return: dictionnary", "def __exit__(self, *args): \"\"\" Context manager exit point. \"\"\" pass def startElement(self, name,", "current element identifier = self._requestUniqueIdentifier() #Has element a parent? If yes get the", "when ending to read an element in XML dataset. This method is part", "tag counter self._element_tags[name] += 1 #Update parent children (if any) if parent is", "the following arguments: - stack - locator The callbacks for end event will", "here ??? def __enter__(self): \"\"\" Context manager entry point. \"\"\" self._id = 0", "class. This enables to enhance the parser with 'on the fly' data quality", "registered callbacks. \"\"\" self._end_callbacks = [ ] self._start_callbacks = [ ] def _requestUniqueIdentifier(self):", "self._end_callbacks = [ ] self._start_callbacks = [ ] def _requestUniqueIdentifier(self): \"\"\" Return a", "care of catching all exceptions within the callback itself. - func: a callable", "Context manager exit point. \"\"\" pass def startElement(self, name, attrs): \"\"\" Method invoked", "overloaded here. - name: tag of element being read - attrs: element attributes", "point. \"\"\" self._id = 0 #unique identifier incremented at self._stack = [ ]", "element children - locator Return value of callbacks is ignored by the handler", "part of of xml.sax.ContentHandler interface and is overloaded here. - name: tag of", "- return: dictionnary where keys are tags and values are a sequence of", "= [ ] self._start_callbacks = [ ] def _requestUniqueIdentifier(self): \"\"\" Return a unique", "event will be called passing the following arguments: - element name - element", "- element attributes (as provided by start event) \"\"\" xml.sax.ContentHandler.__init__(self) #super not working", "if parent is not None: self._children[parent].append((name, attrs)) #Initialisation of own children self._children[identifier] =", "of callback is ignored. Any exception raised by callback is not catched by", "a dictionnary with tags ancestors. - return: dictionnary where keys are tags and", "memory usage while parsing. The _stack internal variable stores tuples - element unique", "func): \"\"\" Register a callback for end event. Note that return value of", "Context manager entry point. \"\"\" self._id = 0 #unique identifier incremented at self._stack", "is possible to register callback functions for start or end events. The callbacks", "following arguments: - stack - locator The callbacks for end event will be", "locator as arguments. \"\"\" self._start_callbacks.append(func) def registerEndEventCallback(self, func): \"\"\" Register a callback for", "name): \"\"\" Method invoked when ending to read an element in XML dataset.", "in XML dataset. This method is part of of xml.sax.ContentHandler interface and is", "ignored by the handler class. This enables to enhance the parser with 'on", "of of xml.sax.ContentHandler interface and is overloaded here. - name: tag of element", "#Update stack self._stack.append((identifier, name, attrs)) #Use registered callbacks for callback in self._start_callbacks: callback(self._stack,", "- tags ancestors It is possible to register callback functions for start or", "start event) - element attributes (as provided by start event) \"\"\" xml.sax.ContentHandler.__init__(self) #super", "locator as arguments. \"\"\" self._end_callbacks.append(func) def clearCallbacks(self): \"\"\" Remove all registered callbacks. \"\"\"", "#Use registered callbacks for callback in self._start_callbacks: callback(self._stack, self._locator) def endElement(self, name): \"\"\"", "not working here ??? def __enter__(self): \"\"\" Context manager entry point. \"\"\" self._id", "limit memory usage while parsing. The _stack internal variable stores tuples - element", "of own children self._children[identifier] = [ ] #Update stack self._stack.append((identifier, name, attrs)) #Use", "- element name (as provided by start event) - element attributes (as provided", "and locator as arguments. \"\"\" self._end_callbacks.append(func) def clearCallbacks(self): \"\"\" Remove all registered callbacks.", "ancestors per tag self._start_callbacks = [ ] #start event callbacks self._end_callbacks = [", "registerStartEventCallback(self, func): \"\"\" Register a callback for start event. Note that return value", "to be used as a context manager. The state of object keeps a", "(if any) if parent is not None: self._children[parent].append((name, attrs)) #Initialisation of own children", "= \".\".join([s[1] for s in self._stack]) self._element_ancestors[name].add(ancestor) #Update tag counter self._element_tags[name] += 1", "is not None: self._children[parent].append((name, attrs)) #Initialisation of own children self._children[identifier] = [ ]", "callbacks is ignored by the handler class. This enables to enhance the parser", "or end events. The callbacks for start event will be called passing the", "[ ] #current stack of element being read self._element_tags = Counter() #counter of", "0 #unique identifier incremented at self._stack = [ ] #current stack of element", "parser with 'on the fly' data quality audit or export. \"\"\" class OpenStreetMapXmlHandler(xml.sax.ContentHandler):", "tags and values are count \"\"\" return dict(self._element_tags) def getTagsAncestors(self): \"\"\" Get a", "self._element_ancestors = defaultdict(set) #collection of ancestors per tag self._start_callbacks = [ ] #start", "for callback in self._end_callbacks: callback(name, self._children[identifier], self._locator) #Cleaning identifier, name, attrs = self._stack.pop(-1)", "ancestors It is possible to register callback functions for start or end events.", "] #current stack of element being read self._element_tags = Counter() #counter of element", "object taking element name, element children and locator as arguments. \"\"\" self._end_callbacks.append(func) def", "in self._stack]) self._element_ancestors[name].add(ancestor) #Update tag counter self._element_tags[name] += 1 #Update parent children (if", "the handler class. This enables to enhance the parser with 'on the fly'", "invoked when starting to read an element in XML dataset. This method is", "callback is not catched by handler, so you should take care of catching", "exit point. \"\"\" pass def startElement(self, name, attrs): \"\"\" Method invoked when starting", "from collections import Counter, defaultdict \"\"\" Custom handler for parsing OpenStreetMap XML files.", "children (if any) if parent is not None: self._children[parent].append((name, attrs)) #Initialisation of own", "1 #Update parent children (if any) if parent is not None: self._children[parent].append((name, attrs))", "all exceptions within the callback itself. - func: a callable object taking element", "None #Exploit current stack to get ancestor ancestor = \".\".join([s[1] for s in", "with tags ancestors. - return: dictionnary where keys are tags and values are", "element being read \"\"\" #Get identifier identifier = self._stack[-1][0] #Use registered callbacks before", "self._stack.append((identifier, name, attrs)) #Use registered callbacks for callback in self._start_callbacks: callback(self._stack, self._locator) def", "element unique identifier - element name (as provided by start event) - element", "you should take care of catching all exceptions within the callback itself. -", "#end event callbacks self._children = { } #children elements of elements being read", "files. While parsing the XML file, handler keeps trace of: - tags count", "def __enter__(self): \"\"\" Context manager entry point. \"\"\" self._id = 0 #unique identifier", "This class is intended to be used as a context manager. The state", "id. try: parent_tuple = self._stack[-1] if parent_tuple[1] == 'osm': #We ignore osm element", "so you should take care of catching all exceptions within the callback itself.", "an element in XML dataset. This method is part of of xml.sax.ContentHandler interface", "path \"\"\" return dict(self._element_ancestors) def registerStartEventCallback(self, func): \"\"\" Register a callback for start", "working here ??? def __enter__(self): \"\"\" Context manager entry point. \"\"\" self._id =", "self._stack[-1] if parent_tuple[1] == 'osm': #We ignore osm element as it has too", "self def __exit__(self, *args): \"\"\" Context manager exit point. \"\"\" pass def startElement(self,", "if parent_tuple[1] == 'osm': #We ignore osm element as it has too many", "the following arguments: - element name - element children - locator Return value", "is cleaned for callback in self._end_callbacks: callback(name, self._children[identifier], self._locator) #Cleaning identifier, name, attrs", "fly' data quality audit or export. \"\"\" class OpenStreetMapXmlHandler(xml.sax.ContentHandler): def __init__(self): \"\"\" Constructor.", "attrs = self._stack.pop(-1) del self._children[identifier] def getTagsCount(self): \"\"\" Get a dictionnary with tags", "Return value of callbacks is ignored by the handler class. This enables to", "read self._element_tags = Counter() #counter of element tags self._element_ancestors = defaultdict(set) #collection of", "trace of stack while parsing. This enables to collect information from children. The", "self._children[identifier] = [ ] #Update stack self._stack.append((identifier, name, attrs)) #Use registered callbacks for", "tag self._start_callbacks = [ ] #start event callbacks self._end_callbacks = [ ] #end", "taking element name, element children and locator as arguments. \"\"\" self._end_callbacks.append(func) def clearCallbacks(self):" ]
[ "SPDX-License-Identifier: Apache-2.0 from subprocess import call, STDOUT from shutil import copyfile import sys", "pass def replace_template_var(template_file, var, value): with fileinput.FileInput(template_file, inplace=True, backup='.bak') as file: for line", "rights reserved. # SPDX-License-Identifier: Apache-2.0 from subprocess import call, STDOUT from shutil import", "import call, STDOUT from shutil import copyfile import sys import os import fileinput", "os import fileinput ORIGINAL_FIX_VERSION_HS = \"gen-source/Version.hs.template\" GENERATED_VERSION_HS = \"DA/Sdk/Cli/Version.hs\" ORIGINAL_MOCKSERVER_HS = \"gen-source/Mockserver.hs.template\" GENERATED_MOCKSERVER_HS", "= \"%s/%s\" % (basedir, GENERATED_VERSION_HS) print(\"Generating %s...\" % GENERATED_VERSION_HS) copyfile(\"%s/%s\" % (basedir, ORIGINAL_FIX_VERSION_HS),", "print(\"Running stack build...\") call([\"stack\", \"build\"], stderr=sys.stderr, stdout=sys.stdout) finally: try: print(\"Removing generated files...\") os.remove(GENERATED_VERSION_HS)", "finally: try: print(\"Removing generated files...\") os.remove(GENERATED_VERSION_HS) os.remove(GENERATED_MOCKSERVER_HS) except OSError: pass def replace_template_var(template_file, var,", "def replace_template_var(template_file, var, value): with fileinput.FileInput(template_file, inplace=True, backup='.bak') as file: for line in", "basedir = os.path.dirname(os.path.realpath(__file__)) gen_vsn_hs = \"%s/%s\" % (basedir, GENERATED_VERSION_HS) print(\"Generating %s...\" % GENERATED_VERSION_HS)", "gen_vsn_hs) replace_template_var(gen_vsn_hs, \"<VERSION-VAR>\", version) print(\"Generating %s...\" % GENERATED_MOCKSERVER_HS) copyfile(\"%s/%s\" % (basedir, ORIGINAL_MOCKSERVER_HS), \"%s/%s\"", "files...\") os.remove(GENERATED_VERSION_HS) os.remove(GENERATED_MOCKSERVER_HS) except OSError: pass def replace_template_var(template_file, var, value): with fileinput.FileInput(template_file, inplace=True,", "\"Mockserver.hs\" def main(version=None): if version is None: version = \"HEAD\" print(\"Stack Builder started...\")", "OSError: pass def replace_template_var(template_file, var, value): with fileinput.FileInput(template_file, inplace=True, backup='.bak') as file: for", "generated files...\") os.remove(GENERATED_VERSION_HS) os.remove(GENERATED_MOCKSERVER_HS) except OSError: pass def replace_template_var(template_file, var, value): with fileinput.FileInput(template_file,", "\"DA/Sdk/Cli/Version.hs\" ORIGINAL_MOCKSERVER_HS = \"gen-source/Mockserver.hs.template\" GENERATED_MOCKSERVER_HS = \"Mockserver.hs\" def main(version=None): if version is None:", "print(\"Removing generated files...\") os.remove(GENERATED_VERSION_HS) os.remove(GENERATED_MOCKSERVER_HS) except OSError: pass def replace_template_var(template_file, var, value): with", "ORIGINAL_MOCKSERVER_HS = \"gen-source/Mockserver.hs.template\" GENERATED_MOCKSERVER_HS = \"Mockserver.hs\" def main(version=None): if version is None: version", "gen_vsn_hs = \"%s/%s\" % (basedir, GENERATED_VERSION_HS) print(\"Generating %s...\" % GENERATED_VERSION_HS) copyfile(\"%s/%s\" % (basedir,", "copyfile(\"%s/%s\" % (basedir, ORIGINAL_FIX_VERSION_HS), gen_vsn_hs) replace_template_var(gen_vsn_hs, \"<VERSION-VAR>\", version) print(\"Generating %s...\" % GENERATED_MOCKSERVER_HS) copyfile(\"%s/%s\"", "and/or its affiliates. All rights reserved. # SPDX-License-Identifier: Apache-2.0 from subprocess import call,", "Builder started...\") try: basedir = os.path.dirname(os.path.realpath(__file__)) gen_vsn_hs = \"%s/%s\" % (basedir, GENERATED_VERSION_HS) print(\"Generating", "replace_template_var(template_file, var, value): with fileinput.FileInput(template_file, inplace=True, backup='.bak') as file: for line in file:", "\"<VERSION-VAR>\", version) print(\"Generating %s...\" % GENERATED_MOCKSERVER_HS) copyfile(\"%s/%s\" % (basedir, ORIGINAL_MOCKSERVER_HS), \"%s/%s\" % (basedir,", "shutil import copyfile import sys import os import fileinput ORIGINAL_FIX_VERSION_HS = \"gen-source/Version.hs.template\" GENERATED_VERSION_HS", "line in file: print(line.replace(var, value), end='') if __name__== \"__main__\": if len(sys.argv) > 1:", "Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. # SPDX-License-Identifier: Apache-2.0", "import sys import os import fileinput ORIGINAL_FIX_VERSION_HS = \"gen-source/Version.hs.template\" GENERATED_VERSION_HS = \"DA/Sdk/Cli/Version.hs\" ORIGINAL_MOCKSERVER_HS", "copyfile import sys import os import fileinput ORIGINAL_FIX_VERSION_HS = \"gen-source/Version.hs.template\" GENERATED_VERSION_HS = \"DA/Sdk/Cli/Version.hs\"", "os.remove(GENERATED_VERSION_HS) os.remove(GENERATED_MOCKSERVER_HS) except OSError: pass def replace_template_var(template_file, var, value): with fileinput.FileInput(template_file, inplace=True, backup='.bak')", "os.remove(GENERATED_MOCKSERVER_HS) except OSError: pass def replace_template_var(template_file, var, value): with fileinput.FileInput(template_file, inplace=True, backup='.bak') as", "__name__== \"__main__\": if len(sys.argv) > 1: version = sys.argv[1] else: version = None", "2019 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. # SPDX-License-Identifier:", "GENERATED_VERSION_HS = \"DA/Sdk/Cli/Version.hs\" ORIGINAL_MOCKSERVER_HS = \"gen-source/Mockserver.hs.template\" GENERATED_MOCKSERVER_HS = \"Mockserver.hs\" def main(version=None): if version", "\"HEAD\" print(\"Stack Builder started...\") try: basedir = os.path.dirname(os.path.realpath(__file__)) gen_vsn_hs = \"%s/%s\" % (basedir,", "with fileinput.FileInput(template_file, inplace=True, backup='.bak') as file: for line in file: print(line.replace(var, value), end='')", "\"gen-source/Version.hs.template\" GENERATED_VERSION_HS = \"DA/Sdk/Cli/Version.hs\" ORIGINAL_MOCKSERVER_HS = \"gen-source/Mockserver.hs.template\" GENERATED_MOCKSERVER_HS = \"Mockserver.hs\" def main(version=None): if", "file: print(line.replace(var, value), end='') if __name__== \"__main__\": if len(sys.argv) > 1: version =", "file: for line in file: print(line.replace(var, value), end='') if __name__== \"__main__\": if len(sys.argv)", "version = \"HEAD\" print(\"Stack Builder started...\") try: basedir = os.path.dirname(os.path.realpath(__file__)) gen_vsn_hs = \"%s/%s\"", "fileinput.FileInput(template_file, inplace=True, backup='.bak') as file: for line in file: print(line.replace(var, value), end='') if", "as file: for line in file: print(line.replace(var, value), end='') if __name__== \"__main__\": if", "# SPDX-License-Identifier: Apache-2.0 from subprocess import call, STDOUT from shutil import copyfile import", "= \"gen-source/Version.hs.template\" GENERATED_VERSION_HS = \"DA/Sdk/Cli/Version.hs\" ORIGINAL_MOCKSERVER_HS = \"gen-source/Mockserver.hs.template\" GENERATED_MOCKSERVER_HS = \"Mockserver.hs\" def main(version=None):", "STDOUT from shutil import copyfile import sys import os import fileinput ORIGINAL_FIX_VERSION_HS =", "subprocess import call, STDOUT from shutil import copyfile import sys import os import", "% (basedir, GENERATED_MOCKSERVER_HS)) print(\"Running stack build...\") call([\"stack\", \"build\"], stderr=sys.stderr, stdout=sys.stdout) finally: try: print(\"Removing", "ORIGINAL_FIX_VERSION_HS), gen_vsn_hs) replace_template_var(gen_vsn_hs, \"<VERSION-VAR>\", version) print(\"Generating %s...\" % GENERATED_MOCKSERVER_HS) copyfile(\"%s/%s\" % (basedir, ORIGINAL_MOCKSERVER_HS),", "GENERATED_MOCKSERVER_HS = \"Mockserver.hs\" def main(version=None): if version is None: version = \"HEAD\" print(\"Stack", "call, STDOUT from shutil import copyfile import sys import os import fileinput ORIGINAL_FIX_VERSION_HS", "(basedir, ORIGINAL_MOCKSERVER_HS), \"%s/%s\" % (basedir, GENERATED_MOCKSERVER_HS)) print(\"Running stack build...\") call([\"stack\", \"build\"], stderr=sys.stderr, stdout=sys.stdout)", "ORIGINAL_FIX_VERSION_HS = \"gen-source/Version.hs.template\" GENERATED_VERSION_HS = \"DA/Sdk/Cli/Version.hs\" ORIGINAL_MOCKSERVER_HS = \"gen-source/Mockserver.hs.template\" GENERATED_MOCKSERVER_HS = \"Mockserver.hs\" def", "(basedir, GENERATED_VERSION_HS) print(\"Generating %s...\" % GENERATED_VERSION_HS) copyfile(\"%s/%s\" % (basedir, ORIGINAL_FIX_VERSION_HS), gen_vsn_hs) replace_template_var(gen_vsn_hs, \"<VERSION-VAR>\",", "version) print(\"Generating %s...\" % GENERATED_MOCKSERVER_HS) copyfile(\"%s/%s\" % (basedir, ORIGINAL_MOCKSERVER_HS), \"%s/%s\" % (basedir, GENERATED_MOCKSERVER_HS))", "def main(version=None): if version is None: version = \"HEAD\" print(\"Stack Builder started...\") try:", "%s...\" % GENERATED_VERSION_HS) copyfile(\"%s/%s\" % (basedir, ORIGINAL_FIX_VERSION_HS), gen_vsn_hs) replace_template_var(gen_vsn_hs, \"<VERSION-VAR>\", version) print(\"Generating %s...\"", "call([\"stack\", \"build\"], stderr=sys.stderr, stdout=sys.stdout) finally: try: print(\"Removing generated files...\") os.remove(GENERATED_VERSION_HS) os.remove(GENERATED_MOCKSERVER_HS) except OSError:", "Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. # SPDX-License-Identifier: Apache-2.0 from", "All rights reserved. # SPDX-License-Identifier: Apache-2.0 from subprocess import call, STDOUT from shutil", "fileinput ORIGINAL_FIX_VERSION_HS = \"gen-source/Version.hs.template\" GENERATED_VERSION_HS = \"DA/Sdk/Cli/Version.hs\" ORIGINAL_MOCKSERVER_HS = \"gen-source/Mockserver.hs.template\" GENERATED_MOCKSERVER_HS = \"Mockserver.hs\"", "build...\") call([\"stack\", \"build\"], stderr=sys.stderr, stdout=sys.stdout) finally: try: print(\"Removing generated files...\") os.remove(GENERATED_VERSION_HS) os.remove(GENERATED_MOCKSERVER_HS) except", "% GENERATED_MOCKSERVER_HS) copyfile(\"%s/%s\" % (basedir, ORIGINAL_MOCKSERVER_HS), \"%s/%s\" % (basedir, GENERATED_MOCKSERVER_HS)) print(\"Running stack build...\")", "GENERATED_VERSION_HS) copyfile(\"%s/%s\" % (basedir, ORIGINAL_FIX_VERSION_HS), gen_vsn_hs) replace_template_var(gen_vsn_hs, \"<VERSION-VAR>\", version) print(\"Generating %s...\" % GENERATED_MOCKSERVER_HS)", "GENERATED_MOCKSERVER_HS)) print(\"Running stack build...\") call([\"stack\", \"build\"], stderr=sys.stderr, stdout=sys.stdout) finally: try: print(\"Removing generated files...\")", "GENERATED_VERSION_HS) print(\"Generating %s...\" % GENERATED_VERSION_HS) copyfile(\"%s/%s\" % (basedir, ORIGINAL_FIX_VERSION_HS), gen_vsn_hs) replace_template_var(gen_vsn_hs, \"<VERSION-VAR>\", version)", "print(line.replace(var, value), end='') if __name__== \"__main__\": if len(sys.argv) > 1: version = sys.argv[1]", "inplace=True, backup='.bak') as file: for line in file: print(line.replace(var, value), end='') if __name__==", "var, value): with fileinput.FileInput(template_file, inplace=True, backup='.bak') as file: for line in file: print(line.replace(var,", "version is None: version = \"HEAD\" print(\"Stack Builder started...\") try: basedir = os.path.dirname(os.path.realpath(__file__))", "= \"DA/Sdk/Cli/Version.hs\" ORIGINAL_MOCKSERVER_HS = \"gen-source/Mockserver.hs.template\" GENERATED_MOCKSERVER_HS = \"Mockserver.hs\" def main(version=None): if version is", "% GENERATED_VERSION_HS) copyfile(\"%s/%s\" % (basedir, ORIGINAL_FIX_VERSION_HS), gen_vsn_hs) replace_template_var(gen_vsn_hs, \"<VERSION-VAR>\", version) print(\"Generating %s...\" %", "= \"gen-source/Mockserver.hs.template\" GENERATED_MOCKSERVER_HS = \"Mockserver.hs\" def main(version=None): if version is None: version =", "for line in file: print(line.replace(var, value), end='') if __name__== \"__main__\": if len(sys.argv) >", "try: basedir = os.path.dirname(os.path.realpath(__file__)) gen_vsn_hs = \"%s/%s\" % (basedir, GENERATED_VERSION_HS) print(\"Generating %s...\" %", "\"gen-source/Mockserver.hs.template\" GENERATED_MOCKSERVER_HS = \"Mockserver.hs\" def main(version=None): if version is None: version = \"HEAD\"", "= \"HEAD\" print(\"Stack Builder started...\") try: basedir = os.path.dirname(os.path.realpath(__file__)) gen_vsn_hs = \"%s/%s\" %", "\"build\"], stderr=sys.stderr, stdout=sys.stdout) finally: try: print(\"Removing generated files...\") os.remove(GENERATED_VERSION_HS) os.remove(GENERATED_MOCKSERVER_HS) except OSError: pass", "in file: print(line.replace(var, value), end='') if __name__== \"__main__\": if len(sys.argv) > 1: version", "sys import os import fileinput ORIGINAL_FIX_VERSION_HS = \"gen-source/Version.hs.template\" GENERATED_VERSION_HS = \"DA/Sdk/Cli/Version.hs\" ORIGINAL_MOCKSERVER_HS =", "#!/usr/bin/env python3 # Copyright (c) 2019 Digital Asset (Switzerland) GmbH and/or its affiliates.", "from shutil import copyfile import sys import os import fileinput ORIGINAL_FIX_VERSION_HS = \"gen-source/Version.hs.template\"", "import fileinput ORIGINAL_FIX_VERSION_HS = \"gen-source/Version.hs.template\" GENERATED_VERSION_HS = \"DA/Sdk/Cli/Version.hs\" ORIGINAL_MOCKSERVER_HS = \"gen-source/Mockserver.hs.template\" GENERATED_MOCKSERVER_HS =", "# Copyright (c) 2019 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights", "is None: version = \"HEAD\" print(\"Stack Builder started...\") try: basedir = os.path.dirname(os.path.realpath(__file__)) gen_vsn_hs", "\"%s/%s\" % (basedir, GENERATED_MOCKSERVER_HS)) print(\"Running stack build...\") call([\"stack\", \"build\"], stderr=sys.stderr, stdout=sys.stdout) finally: try:", "(basedir, GENERATED_MOCKSERVER_HS)) print(\"Running stack build...\") call([\"stack\", \"build\"], stderr=sys.stderr, stdout=sys.stdout) finally: try: print(\"Removing generated", "if __name__== \"__main__\": if len(sys.argv) > 1: version = sys.argv[1] else: version =", "stdout=sys.stdout) finally: try: print(\"Removing generated files...\") os.remove(GENERATED_VERSION_HS) os.remove(GENERATED_MOCKSERVER_HS) except OSError: pass def replace_template_var(template_file,", "(c) 2019 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. #", "print(\"Generating %s...\" % GENERATED_VERSION_HS) copyfile(\"%s/%s\" % (basedir, ORIGINAL_FIX_VERSION_HS), gen_vsn_hs) replace_template_var(gen_vsn_hs, \"<VERSION-VAR>\", version) print(\"Generating", "os.path.dirname(os.path.realpath(__file__)) gen_vsn_hs = \"%s/%s\" % (basedir, GENERATED_VERSION_HS) print(\"Generating %s...\" % GENERATED_VERSION_HS) copyfile(\"%s/%s\" %", "value): with fileinput.FileInput(template_file, inplace=True, backup='.bak') as file: for line in file: print(line.replace(var, value),", "started...\") try: basedir = os.path.dirname(os.path.realpath(__file__)) gen_vsn_hs = \"%s/%s\" % (basedir, GENERATED_VERSION_HS) print(\"Generating %s...\"", "Apache-2.0 from subprocess import call, STDOUT from shutil import copyfile import sys import", "its affiliates. All rights reserved. # SPDX-License-Identifier: Apache-2.0 from subprocess import call, STDOUT", "import os import fileinput ORIGINAL_FIX_VERSION_HS = \"gen-source/Version.hs.template\" GENERATED_VERSION_HS = \"DA/Sdk/Cli/Version.hs\" ORIGINAL_MOCKSERVER_HS = \"gen-source/Mockserver.hs.template\"", "stack build...\") call([\"stack\", \"build\"], stderr=sys.stderr, stdout=sys.stdout) finally: try: print(\"Removing generated files...\") os.remove(GENERATED_VERSION_HS) os.remove(GENERATED_MOCKSERVER_HS)", "main(version=None): if version is None: version = \"HEAD\" print(\"Stack Builder started...\") try: basedir", "= os.path.dirname(os.path.realpath(__file__)) gen_vsn_hs = \"%s/%s\" % (basedir, GENERATED_VERSION_HS) print(\"Generating %s...\" % GENERATED_VERSION_HS) copyfile(\"%s/%s\"", "reserved. # SPDX-License-Identifier: Apache-2.0 from subprocess import call, STDOUT from shutil import copyfile", "= \"Mockserver.hs\" def main(version=None): if version is None: version = \"HEAD\" print(\"Stack Builder", "print(\"Stack Builder started...\") try: basedir = os.path.dirname(os.path.realpath(__file__)) gen_vsn_hs = \"%s/%s\" % (basedir, GENERATED_VERSION_HS)", "% (basedir, ORIGINAL_MOCKSERVER_HS), \"%s/%s\" % (basedir, GENERATED_MOCKSERVER_HS)) print(\"Running stack build...\") call([\"stack\", \"build\"], stderr=sys.stderr,", "end='') if __name__== \"__main__\": if len(sys.argv) > 1: version = sys.argv[1] else: version", "from subprocess import call, STDOUT from shutil import copyfile import sys import os", "import copyfile import sys import os import fileinput ORIGINAL_FIX_VERSION_HS = \"gen-source/Version.hs.template\" GENERATED_VERSION_HS =", "% (basedir, GENERATED_VERSION_HS) print(\"Generating %s...\" % GENERATED_VERSION_HS) copyfile(\"%s/%s\" % (basedir, ORIGINAL_FIX_VERSION_HS), gen_vsn_hs) replace_template_var(gen_vsn_hs,", "(Switzerland) GmbH and/or its affiliates. All rights reserved. # SPDX-License-Identifier: Apache-2.0 from subprocess", "%s...\" % GENERATED_MOCKSERVER_HS) copyfile(\"%s/%s\" % (basedir, ORIGINAL_MOCKSERVER_HS), \"%s/%s\" % (basedir, GENERATED_MOCKSERVER_HS)) print(\"Running stack", "% (basedir, ORIGINAL_FIX_VERSION_HS), gen_vsn_hs) replace_template_var(gen_vsn_hs, \"<VERSION-VAR>\", version) print(\"Generating %s...\" % GENERATED_MOCKSERVER_HS) copyfile(\"%s/%s\" %", "stderr=sys.stderr, stdout=sys.stdout) finally: try: print(\"Removing generated files...\") os.remove(GENERATED_VERSION_HS) os.remove(GENERATED_MOCKSERVER_HS) except OSError: pass def", "try: print(\"Removing generated files...\") os.remove(GENERATED_VERSION_HS) os.remove(GENERATED_MOCKSERVER_HS) except OSError: pass def replace_template_var(template_file, var, value):", "value), end='') if __name__== \"__main__\": if len(sys.argv) > 1: version = sys.argv[1] else:", "except OSError: pass def replace_template_var(template_file, var, value): with fileinput.FileInput(template_file, inplace=True, backup='.bak') as file:", "\"%s/%s\" % (basedir, GENERATED_VERSION_HS) print(\"Generating %s...\" % GENERATED_VERSION_HS) copyfile(\"%s/%s\" % (basedir, ORIGINAL_FIX_VERSION_HS), gen_vsn_hs)", "replace_template_var(gen_vsn_hs, \"<VERSION-VAR>\", version) print(\"Generating %s...\" % GENERATED_MOCKSERVER_HS) copyfile(\"%s/%s\" % (basedir, ORIGINAL_MOCKSERVER_HS), \"%s/%s\" %", "affiliates. All rights reserved. # SPDX-License-Identifier: Apache-2.0 from subprocess import call, STDOUT from", "Copyright (c) 2019 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.", "None: version = \"HEAD\" print(\"Stack Builder started...\") try: basedir = os.path.dirname(os.path.realpath(__file__)) gen_vsn_hs =", "GENERATED_MOCKSERVER_HS) copyfile(\"%s/%s\" % (basedir, ORIGINAL_MOCKSERVER_HS), \"%s/%s\" % (basedir, GENERATED_MOCKSERVER_HS)) print(\"Running stack build...\") call([\"stack\",", "python3 # Copyright (c) 2019 Digital Asset (Switzerland) GmbH and/or its affiliates. All", "GmbH and/or its affiliates. All rights reserved. # SPDX-License-Identifier: Apache-2.0 from subprocess import", "\"__main__\": if len(sys.argv) > 1: version = sys.argv[1] else: version = None main(version)", "ORIGINAL_MOCKSERVER_HS), \"%s/%s\" % (basedir, GENERATED_MOCKSERVER_HS)) print(\"Running stack build...\") call([\"stack\", \"build\"], stderr=sys.stderr, stdout=sys.stdout) finally:", "print(\"Generating %s...\" % GENERATED_MOCKSERVER_HS) copyfile(\"%s/%s\" % (basedir, ORIGINAL_MOCKSERVER_HS), \"%s/%s\" % (basedir, GENERATED_MOCKSERVER_HS)) print(\"Running", "backup='.bak') as file: for line in file: print(line.replace(var, value), end='') if __name__== \"__main__\":", "if version is None: version = \"HEAD\" print(\"Stack Builder started...\") try: basedir =", "copyfile(\"%s/%s\" % (basedir, ORIGINAL_MOCKSERVER_HS), \"%s/%s\" % (basedir, GENERATED_MOCKSERVER_HS)) print(\"Running stack build...\") call([\"stack\", \"build\"],", "(basedir, ORIGINAL_FIX_VERSION_HS), gen_vsn_hs) replace_template_var(gen_vsn_hs, \"<VERSION-VAR>\", version) print(\"Generating %s...\" % GENERATED_MOCKSERVER_HS) copyfile(\"%s/%s\" % (basedir," ]
[ "plasmasystem self.eaffinity = 4.05*const.elementary_charge def __call__(self, zcharge, radius): return self.ptunnel(zcharge, radius) def rt_affinity(self,", "np import scipy.constants as const from scipy.integrate import solve_ivp PI = const.pi KE", "0.0, 0.0, tunnel_gain, energy_gain, energy_loss]) nano_qdens = np.sum(npdensity*grid_data.qpivots) pchem.nano_qdens = nano_qdens nano_qdens_rate =", "KE*qcharge/radius def particle_potenergy(radius, zcharge): \"\"\" Nanoparticle potential energy \"\"\" return -(KE*zcharge*QE**2)/radius def tunnel(rtaff,", "radius): return self.ptunnel(zcharge, radius) def rt_affinity(self, radius, zcharge): \"\"\" Computes rt_affinity to particle", "KIND, either express or implied. # See the License for the specific language", "Unless required by applicable law or agreed to in writing, software # distributed", "\"\"\" Compute frequencies \"\"\" self.coll.compute_collisionfreq(energy, edensity, idensity, self.efreq, self.ifreq, self.tfreq) def compute_plasmacharging(time, delta_t,", "Compute frequencies \"\"\" self.coll.compute_collisionfreq(energy, edensity, idensity, self.efreq, self.ifreq, self.tfreq) def compute_plasmacharging(time, delta_t, grid_data,", "pchem.density_sourcedrain = np.array([electron_loss, ion_loss, 0.0, 0.0, tunnel_gain, energy_gain, energy_loss]) nano_qdens = np.sum(npdensity*grid_data.qpivots) pchem.nano_qdens", "\"\"\" Computes electron tunnel frequency \"\"\" def __init__(self, plasmasystem): self.psys = plasmasystem self.eaffinity", "from scipy.integrate import solve_ivp PI = const.pi KE = 1.0/(4.0*PI*const.epsilon_0) INVKE = 1.0/KE", "(ifreqfactor * rmesh2[:, gdata.qpivots > 0] * np.exp(-QE*phid[:, gdata.qpivots > 0]/kti)) for i,", "nel = pchem.past_plasmadensity[0] nar = pchem.past_plasmadensity[1] npdensity = growth_data.next_density ion_loss = np.sum(npdensity*charging.ifreq)/nar electron_loss", "self.psys.ion_temperature ion_energy = (ion_energy_from_temperature + 0.5*self.psys.armass*self.ion_velocity*self.ion_velocity) kti = (2.0/3.0)*ion_energy ifreqfactor = 4.0 *", "gdata.qpivots <= 0] = (ifreqfactor * rmesh2[:, gdata.qpivots <= 0] * (1.0 -", "self.eaffinity ainf = ainfinity * INVKE/QE**2 rtaff = zcharge/(zcharge/radius + ainf - (5.0/(8.0*radius)))", "= TunnelFrequency(self.psys) self.rmesh, self.qmesh = np.meshgrid(self.gdata.dpivots*0.5e-9, self.gdata.qpivots*QE, indexing='ij') self.rmesh, self.zmesh = np.meshgrid(self.gdata.dpivots*0.5e-9, self.gdata.qpivots,", "collision frequencies \"\"\" def __init__(self, plasmasystem, grid_data): self.psys = plasmasystem self.gdata = grid_data", "\"\"\" with_tunnel = plasma_sys.with_tunnel nel = pchem.past_plasmadensity[0] nar = pchem.past_plasmadensity[1] npdensity = growth_data.next_density", "Tunnel frequency \"\"\" kte = (2.0/3.0)*energy*QE efreqfactor = 4.0 * PI * edensity", "prefac1 = -2./const.hbar prefac2 = np.sqrt(2.*ME*particle_potenergy(rtaff, zcharge)) return np.exp(prefac1*prefac2*(rtaff*np.arccos(np.sqrt(radius/rtaff)) -np.sqrt(radius*(rtaff-radius)))) class TunnelFrequency: \"\"\"", "* (1.0 + QE*phid[:, gdata.qpivots >= 0]/kte)) ifreq[:, gdata.qpivots <= 0] = (ifreqfactor", "\"\"\" def __init__(self, plasmasystem): self.psys = plasmasystem self.eaffinity = 4.05*const.elementary_charge def __call__(self, zcharge,", "= np.sum(npdensity*grid_data.qpivots) pchem.nano_qdens = nano_qdens nano_qdens_rate = np.sum(growth_data.qrate2d*grid_data.qpivots) pchem.nano_qdens_rate = nano_qdens_rate plasma_sys =", "const.elementary_charge ME = const.electron_mass def coulomb_floatpotential(qcharge, radius): \"\"\" Floating potential \"\"\" return KE*qcharge/radius", "this file except in compliance with the License. # You may obtain a", "np.sum(growth_data.qrate2d*grid_data.qpivots) pchem.nano_qdens_rate = nano_qdens_rate plasma_sys = pchem.get_system() sol = solve_ivp(plasma_sys, [time, time+delta_t], pchem.past_plasmadensity,", "tfreq): \"\"\" Compute collision frequencies OML theory and Tunnel frequency \"\"\" kte =", "efreq[i][j]): tfreq[i][j] = 1e6*ifreq[i][j] class Charging: \"\"\" Compute nanoparticle charging rate \"\"\" def", "delta_t, grid_data, pchem, growth_data, charging, plasma_sys): \"\"\" Solve the plasma densities \"\"\" with_tunnel", "zcharge, radius): \"\"\" Tunnel frequency \"\"\" prefac1 = (-zcharge)*np.sqrt(2.*const.Boltzmann*self.psys.temperature/ME)*(0.5/radius) rtaff = self.rt_affinity(radius, zcharge)", "1000000.0 return rtaff def ptunnel(self, zcharge, radius): \"\"\" Tunnel frequency \"\"\" prefac1 =", "i, diam in enumerate(gdata.dpivots): for j, zcharge in enumerate(gdata.qpivots[gdata.qpivots < 0]): if (tfreq[i][j]", "the plasma densities \"\"\" with_tunnel = plasma_sys.with_tunnel nel = pchem.past_plasmadensity[0] nar = pchem.past_plasmadensity[1]", "<NAME> # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "self.tfrequency = TunnelFrequency(self.psys) self.rmesh, self.qmesh = np.meshgrid(self.gdata.dpivots*0.5e-9, self.gdata.qpivots*QE, indexing='ij') self.rmesh, self.zmesh = np.meshgrid(self.gdata.dpivots*0.5e-9,", "np.isscalar(rtaff): if rtaff < 0: return 1000000.0 else: rtaff[rtaff < 0] = 1000000.0", "ANY KIND, either express or implied. # See the License for the specific", "sol = solve_ivp(plasma_sys, [time, time+delta_t], pchem.past_plasmadensity, method='BDF', dense_output=False, t_eval=[time, time+delta_t]) pchem.next_plasmadensity = np.nan_to_num(sol.y.T[-1])", "(2.0/3.0)*ion_energy ifreqfactor = 4.0 * PI * idensity * np.sqrt(kti/(2.0*PI*self.psys.armass)) efreq.fill(0) ifreq.fill(0) tfreq.fill(0)", "self.psys = plasmasystem self.gdata = grid_data self.tfrequency = TunnelFrequency(self.psys) self.rmesh, self.qmesh = np.meshgrid(self.gdata.dpivots*0.5e-9,", "electron tunnel frequency \"\"\" def __init__(self, plasmasystem): self.psys = plasmasystem self.eaffinity = 4.05*const.elementary_charge", "= with_tunnel*np.sum(npdensity*charging.tfreq) energy_gain = with_tunnel*np.sum(charging.coll.phid*npdensity*charging.tfreq) pchem.density_sourcedrain = np.array([electron_loss, ion_loss, 0.0, 0.0, tunnel_gain, energy_gain,", "<= 0] = (ifreqfactor * rmesh2[:, gdata.qpivots <= 0] * (1.0 - QE*phid[:,", "= nano_qdens_rate plasma_sys = pchem.get_system() sol = solve_ivp(plasma_sys, [time, time+delta_t], pchem.past_plasmadensity, method='BDF', dense_output=False,", "= np.zeros((self.nvols, self.nchrgs)) self.tfreq = np.zeros((self.nvols, self.nchrgs)) def compute_freqs(self, energy, edensity, idensity): \"\"\"", "def compute_freqs(self, energy, edensity, idensity): \"\"\" Compute frequencies \"\"\" self.coll.compute_collisionfreq(energy, edensity, idensity, self.efreq,", "= np.zeros((self.nvols, self.nchrgs)) def compute_freqs(self, energy, edensity, idensity): \"\"\" Compute frequencies \"\"\" self.coll.compute_collisionfreq(energy,", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "idensity, self.efreq, self.ifreq, self.tfreq) def compute_plasmacharging(time, delta_t, grid_data, pchem, growth_data, charging, plasma_sys): \"\"\"", "zcharge): \"\"\" Tunneling probability \"\"\" prefac1 = -2./const.hbar prefac2 = np.sqrt(2.*ME*particle_potenergy(rtaff, zcharge)) return", "tfreq.fill(0) gdata = self.gdata rmesh2 = self.rmesh2 phid = self.phid efreq[:, gdata.qpivots <", "= with_tunnel*np.sum(charging.coll.phid*npdensity*charging.tfreq) pchem.density_sourcedrain = np.array([electron_loss, ion_loss, 0.0, 0.0, tunnel_gain, energy_gain, energy_loss]) nano_qdens =", "TunnelFrequency(self.psys) self.rmesh, self.qmesh = np.meshgrid(self.gdata.dpivots*0.5e-9, self.gdata.qpivots*QE, indexing='ij') self.rmesh, self.zmesh = np.meshgrid(self.gdata.dpivots*0.5e-9, self.gdata.qpivots, indexing='ij')", "ifreq[:, gdata.qpivots <= 0] = (ifreqfactor * rmesh2[:, gdata.qpivots <= 0] * (1.0", "escape \"\"\" ainfinity = self.eaffinity ainf = ainfinity * INVKE/QE**2 rtaff = zcharge/(zcharge/radius", "-np.sqrt(radius*(rtaff-radius)))) class TunnelFrequency: \"\"\" Computes electron tunnel frequency \"\"\" def __init__(self, plasmasystem): self.psys", "\"\"\" return -(KE*zcharge*QE**2)/radius def tunnel(rtaff, radius, zcharge): \"\"\" Tunneling probability \"\"\" prefac1 =", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "numpy as np import scipy.constants as const from scipy.integrate import solve_ivp PI =", "= nano_qdens nano_qdens_rate = np.sum(growth_data.qrate2d*grid_data.qpivots) pchem.nano_qdens_rate = nano_qdens_rate plasma_sys = pchem.get_system() sol =", "np.exp(QE*phid[:, gdata.qpivots < 0]/kte)) efreq[:, gdata.qpivots >= 0] = (efreqfactor * rmesh2[:, gdata.qpivots", "self.rmesh, self.qmesh = np.meshgrid(self.gdata.dpivots*0.5e-9, self.gdata.qpivots*QE, indexing='ij') self.rmesh, self.zmesh = np.meshgrid(self.gdata.dpivots*0.5e-9, self.gdata.qpivots, indexing='ij') self.rmesh2", "OF ANY KIND, either express or implied. # See the License for the", "nar = pchem.past_plasmadensity[1] npdensity = growth_data.next_density ion_loss = np.sum(npdensity*charging.ifreq)/nar electron_loss = np.sum(npdensity*charging.efreq)/nel energy_loss", "# Copyright 2019 <NAME> # # Licensed under the Apache License, Version 2.0", "radius, zcharge): \"\"\" Computes rt_affinity to particle to escape \"\"\" ainfinity = self.eaffinity", "self.rt_affinity(radius, zcharge) return prefac1*tunnel(rtaff, radius, zcharge) class CollisionFrequency: \"\"\" Stores and computes collision", "the License. # # -*- coding: utf-8 -*- \"\"\" This module contains the", "def __call__(self, zcharge, radius): return self.ptunnel(zcharge, radius) def rt_affinity(self, radius, zcharge): \"\"\" Computes", "\"\"\" prefac1 = -2./const.hbar prefac2 = np.sqrt(2.*ME*particle_potenergy(rtaff, zcharge)) return np.exp(prefac1*prefac2*(rtaff*np.arccos(np.sqrt(radius/rtaff)) -np.sqrt(radius*(rtaff-radius)))) class TunnelFrequency:", "ptunnel(self, zcharge, radius): \"\"\" Tunnel frequency \"\"\" prefac1 = (-zcharge)*np.sqrt(2.*const.Boltzmann*self.psys.temperature/ME)*(0.5/radius) rtaff = self.rt_affinity(radius,", "energy_loss = np.sum(charging.coll.phid*npdensity*charging.efreq)/nel tunnel_gain = with_tunnel*np.sum(npdensity*charging.tfreq) energy_gain = with_tunnel*np.sum(charging.coll.phid*npdensity*charging.tfreq) pchem.density_sourcedrain = np.array([electron_loss, ion_loss,", "Tunnel frequency \"\"\" prefac1 = (-zcharge)*np.sqrt(2.*const.Boltzmann*self.psys.temperature/ME)*(0.5/radius) rtaff = self.rt_affinity(radius, zcharge) return prefac1*tunnel(rtaff, radius,", "efreqfactor = 4.0 * PI * edensity * np.sqrt(kte/(2.0*PI*ME)) ion_energy_from_temperature = (3.0/2.0) *", "= zcharge/(zcharge/radius + ainf - (5.0/(8.0*radius))) if np.isscalar(rtaff): if rtaff < 0: return", "(-zcharge)*np.sqrt(2.*const.Boltzmann*self.psys.temperature/ME)*(0.5/radius) rtaff = self.rt_affinity(radius, zcharge) return prefac1*tunnel(rtaff, radius, zcharge) class CollisionFrequency: \"\"\" Stores", "indexing='ij') self.rmesh2 = self.rmesh**2 self.phid = coulomb_floatpotential(self.qmesh, self.rmesh) self.ion_velocity = 0.0 def compute_collisionfreq(self,", "return self.ptunnel(zcharge, radius) def rt_affinity(self, radius, zcharge): \"\"\" Computes rt_affinity to particle to", "1e6*ifreq[i][j] class Charging: \"\"\" Compute nanoparticle charging rate \"\"\" def __init__(self, collision_frequency, grid_data):", "else: rtaff[rtaff < 0] = 1000000.0 return rtaff def ptunnel(self, zcharge, radius): \"\"\"", "< 0] = 1000000.0 return rtaff def ptunnel(self, zcharge, radius): \"\"\" Tunnel frequency", "= \"Copyright 2019\" __credits__ = [\"<NAME>\"] __license__ = \"Apache 2.0\" __version__ = \"0.0.1\"", "\"Apache 2.0\" __version__ = \"0.0.1\" __maintainer__ = \"<NAME>\" __email__ = \"<EMAIL>\" __status__ =", "\"0.0.1\" __maintainer__ = \"<NAME>\" __email__ = \"<EMAIL>\" __status__ = \"Beta\" import numpy as", "scipy.constants as const from scipy.integrate import solve_ivp PI = const.pi KE = 1.0/(4.0*PI*const.epsilon_0)", "ion_loss = np.sum(npdensity*charging.ifreq)/nar electron_loss = np.sum(npdensity*charging.efreq)/nel energy_loss = np.sum(charging.coll.phid*npdensity*charging.efreq)/nel tunnel_gain = with_tunnel*np.sum(npdensity*charging.tfreq) energy_gain", "= growth_data.next_density ion_loss = np.sum(npdensity*charging.ifreq)/nar electron_loss = np.sum(npdensity*charging.efreq)/nel energy_loss = np.sum(charging.coll.phid*npdensity*charging.efreq)/nel tunnel_gain =", "prefac1 = (-zcharge)*np.sqrt(2.*const.Boltzmann*self.psys.temperature/ME)*(0.5/radius) rtaff = self.rt_affinity(radius, zcharge) return prefac1*tunnel(rtaff, radius, zcharge) class CollisionFrequency:", "gdata.qpivots >= 0]/kte)) ifreq[:, gdata.qpivots <= 0] = (ifreqfactor * rmesh2[:, gdata.qpivots <=", "__maintainer__ = \"<NAME>\" __email__ = \"<EMAIL>\" __status__ = \"Beta\" import numpy as np", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "\"\"\" def __init__(self, plasmasystem, grid_data): self.psys = plasmasystem self.gdata = grid_data self.tfrequency =", "def __init__(self, plasmasystem, grid_data): self.psys = plasmasystem self.gdata = grid_data self.tfrequency = TunnelFrequency(self.psys)", "idensity): \"\"\" Compute frequencies \"\"\" self.coll.compute_collisionfreq(energy, edensity, idensity, self.efreq, self.ifreq, self.tfreq) def compute_plasmacharging(time,", "enumerate(gdata.qpivots[gdata.qpivots < 0]): if (tfreq[i][j] > 1e6*ifreq[i][j]) and (ifreq[i][j] > efreq[i][j]): tfreq[i][j] =", "and computes collision frequencies \"\"\" def __init__(self, plasmasystem, grid_data): self.psys = plasmasystem self.gdata", "tunnel frequency \"\"\" def __init__(self, plasmasystem): self.psys = plasmasystem self.eaffinity = 4.05*const.elementary_charge def", "with_tunnel*np.sum(charging.coll.phid*npdensity*charging.tfreq) pchem.density_sourcedrain = np.array([electron_loss, ion_loss, 0.0, 0.0, tunnel_gain, energy_gain, energy_loss]) nano_qdens = np.sum(npdensity*grid_data.qpivots)", "energy \"\"\" return -(KE*zcharge*QE**2)/radius def tunnel(rtaff, radius, zcharge): \"\"\" Tunneling probability \"\"\" prefac1", "np.array([electron_loss, ion_loss, 0.0, 0.0, tunnel_gain, energy_gain, energy_loss]) nano_qdens = np.sum(npdensity*grid_data.qpivots) pchem.nano_qdens = nano_qdens", "(efreqfactor * rmesh2[:, gdata.qpivots < 0] * np.exp(QE*phid[:, gdata.qpivots < 0]/kte)) efreq[:, gdata.qpivots", "KB * self.psys.ion_temperature ion_energy = (ion_energy_from_temperature + 0.5*self.psys.armass*self.ion_velocity*self.ion_velocity) kti = (2.0/3.0)*ion_energy ifreqfactor =", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "__email__ = \"<EMAIL>\" __status__ = \"Beta\" import numpy as np import scipy.constants as", "collision frequencies OML theory and Tunnel frequency \"\"\" kte = (2.0/3.0)*energy*QE efreqfactor =", "solve_ivp PI = const.pi KE = 1.0/(4.0*PI*const.epsilon_0) INVKE = 1.0/KE KB = const.Boltzmann", "radius) def rt_affinity(self, radius, zcharge): \"\"\" Computes rt_affinity to particle to escape \"\"\"", "probability \"\"\" prefac1 = -2./const.hbar prefac2 = np.sqrt(2.*ME*particle_potenergy(rtaff, zcharge)) return np.exp(prefac1*prefac2*(rtaff*np.arccos(np.sqrt(radius/rtaff)) -np.sqrt(radius*(rtaff-radius)))) class", "QE = const.elementary_charge ME = const.electron_mass def coulomb_floatpotential(qcharge, radius): \"\"\" Floating potential \"\"\"", "grid_data self.nvols = self.grid_data.nvols self.nchrgs = self.grid_data.nchrgs self.efreq = np.zeros((self.nvols, self.nchrgs)) self.ifreq =", "\"\"\" prefac1 = (-zcharge)*np.sqrt(2.*const.Boltzmann*self.psys.temperature/ME)*(0.5/radius) rtaff = self.rt_affinity(radius, zcharge) return prefac1*tunnel(rtaff, radius, zcharge) class", "np.zeros((self.nvols, self.nchrgs)) def compute_freqs(self, energy, edensity, idensity): \"\"\" Compute frequencies \"\"\" self.coll.compute_collisionfreq(energy, edensity,", "= 4.0 * PI * idensity * np.sqrt(kti/(2.0*PI*self.psys.armass)) efreq.fill(0) ifreq.fill(0) tfreq.fill(0) gdata =", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "INVKE = 1.0/KE KB = const.Boltzmann QE = const.elementary_charge ME = const.electron_mass def", "class CollisionFrequency: \"\"\" Stores and computes collision frequencies \"\"\" def __init__(self, plasmasystem, grid_data):", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "self.grid_data.nchrgs self.efreq = np.zeros((self.nvols, self.nchrgs)) self.ifreq = np.zeros((self.nvols, self.nchrgs)) self.tfreq = np.zeros((self.nvols, self.nchrgs))", "pchem.past_plasmadensity[0] nar = pchem.past_plasmadensity[1] npdensity = growth_data.next_density ion_loss = np.sum(npdensity*charging.ifreq)/nar electron_loss = np.sum(npdensity*charging.efreq)/nel", "zcharge/(zcharge/radius + ainf - (5.0/(8.0*radius))) if np.isscalar(rtaff): if rtaff < 0: return 1000000.0", "return np.exp(prefac1*prefac2*(rtaff*np.arccos(np.sqrt(radius/rtaff)) -np.sqrt(radius*(rtaff-radius)))) class TunnelFrequency: \"\"\" Computes electron tunnel frequency \"\"\" def __init__(self,", "\"\"\" Nanoparticle potential energy \"\"\" return -(KE*zcharge*QE**2)/radius def tunnel(rtaff, radius, zcharge): \"\"\" Tunneling", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "0.5*self.psys.armass*self.ion_velocity*self.ion_velocity) kti = (2.0/3.0)*ion_energy ifreqfactor = 4.0 * PI * idensity * np.sqrt(kti/(2.0*PI*self.psys.armass))", "np.sum(charging.coll.phid*npdensity*charging.efreq)/nel tunnel_gain = with_tunnel*np.sum(npdensity*charging.tfreq) energy_gain = with_tunnel*np.sum(charging.coll.phid*npdensity*charging.tfreq) pchem.density_sourcedrain = np.array([electron_loss, ion_loss, 0.0, 0.0,", "limitations under the License. # # -*- coding: utf-8 -*- \"\"\" This module", "-2./const.hbar prefac2 = np.sqrt(2.*ME*particle_potenergy(rtaff, zcharge)) return np.exp(prefac1*prefac2*(rtaff*np.arccos(np.sqrt(radius/rtaff)) -np.sqrt(radius*(rtaff-radius)))) class TunnelFrequency: \"\"\" Computes electron", "self.tfreq) def compute_plasmacharging(time, delta_t, grid_data, pchem, growth_data, charging, plasma_sys): \"\"\" Solve the plasma", "scipy.integrate import solve_ivp PI = const.pi KE = 1.0/(4.0*PI*const.epsilon_0) INVKE = 1.0/KE KB", "\"\"\" Computes rt_affinity to particle to escape \"\"\" ainfinity = self.eaffinity ainf =", "required by applicable law or agreed to in writing, software # distributed under", "0] * np.exp(QE*phid[:, gdata.qpivots < 0]/kte)) efreq[:, gdata.qpivots >= 0] = (efreqfactor *", "method='BDF', dense_output=False, t_eval=[time, time+delta_t]) pchem.next_plasmadensity = np.nan_to_num(sol.y.T[-1]) # quasineutrality pchem.next_plasmadensity[3] = (pchem.next_plasmadensity[0]-pchem.nano_qdens -pchem.next_plasmadensity[1])", "applicable law or agreed to in writing, software # distributed under the License", "(1.0 - QE*phid[:, gdata.qpivots <= 0]/kti)) ifreq[:, gdata.qpivots > 0] = (ifreqfactor *", "self.gdata rmesh2 = self.rmesh2 phid = self.phid efreq[:, gdata.qpivots < 0] = (efreqfactor", "OML theory and Tunnel frequency \"\"\" kte = (2.0/3.0)*energy*QE efreqfactor = 4.0 *", "This module contains the classes functions and helpers to compute the plasma. \"\"\"", "to compute the plasma. \"\"\" __author__ = \"<NAME>\" __copyright__ = \"Copyright 2019\" __credits__", "with_tunnel = plasma_sys.with_tunnel nel = pchem.past_plasmadensity[0] nar = pchem.past_plasmadensity[1] npdensity = growth_data.next_density ion_loss", "License. # # -*- coding: utf-8 -*- \"\"\" This module contains the classes", "self.gdata = grid_data self.tfrequency = TunnelFrequency(self.psys) self.rmesh, self.qmesh = np.meshgrid(self.gdata.dpivots*0.5e-9, self.gdata.qpivots*QE, indexing='ij') self.rmesh,", "helpers to compute the plasma. \"\"\" __author__ = \"<NAME>\" __copyright__ = \"Copyright 2019\"", "const.electron_mass def coulomb_floatpotential(qcharge, radius): \"\"\" Floating potential \"\"\" return KE*qcharge/radius def particle_potenergy(radius, zcharge):", "or agreed to in writing, software # distributed under the License is distributed", "__init__(self, collision_frequency, grid_data): \"\"\" \"\"\" self.coll = collision_frequency self.grid_data = grid_data self.nvols =", "* np.sqrt(kti/(2.0*PI*self.psys.armass)) efreq.fill(0) ifreq.fill(0) tfreq.fill(0) gdata = self.gdata rmesh2 = self.rmesh2 phid =", "__version__ = \"0.0.1\" __maintainer__ = \"<NAME>\" __email__ = \"<EMAIL>\" __status__ = \"Beta\" import", "0] = (efreqfactor * rmesh2[:, gdata.qpivots < 0] * np.exp(QE*phid[:, gdata.qpivots < 0]/kte))", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "\"\"\" Compute nanoparticle charging rate \"\"\" def __init__(self, collision_frequency, grid_data): \"\"\" \"\"\" self.coll", "= \"<NAME>\" __copyright__ = \"Copyright 2019\" __credits__ = [\"<NAME>\"] __license__ = \"Apache 2.0\"", "plasma_sys = pchem.get_system() sol = solve_ivp(plasma_sys, [time, time+delta_t], pchem.past_plasmadensity, method='BDF', dense_output=False, t_eval=[time, time+delta_t])", ">= 0]/kte)) ifreq[:, gdata.qpivots <= 0] = (ifreqfactor * rmesh2[:, gdata.qpivots <= 0]", "efreq, ifreq, tfreq): \"\"\" Compute collision frequencies OML theory and Tunnel frequency \"\"\"", "gdata.qpivots > 0] * np.exp(-QE*phid[:, gdata.qpivots > 0]/kti)) for i, diam in enumerate(gdata.dpivots):", "\"Copyright 2019\" __credits__ = [\"<NAME>\"] __license__ = \"Apache 2.0\" __version__ = \"0.0.1\" __maintainer__", "enumerate(gdata.qpivots[gdata.qpivots < 0]): tfreq[i][j] = self.tfrequency(zcharge, 0.5e-9*diam) for i, diam in enumerate(gdata.dpivots): for", "= 1e6*ifreq[i][j] class Charging: \"\"\" Compute nanoparticle charging rate \"\"\" def __init__(self, collision_frequency,", "= 1.0/(4.0*PI*const.epsilon_0) INVKE = 1.0/KE KB = const.Boltzmann QE = const.elementary_charge ME =", "under the Apache License, Version 2.0 (the \"License\"); # you may not use", "rtaff = self.rt_affinity(radius, zcharge) return prefac1*tunnel(rtaff, radius, zcharge) class CollisionFrequency: \"\"\" Stores and", "ainf = ainfinity * INVKE/QE**2 rtaff = zcharge/(zcharge/radius + ainf - (5.0/(8.0*radius))) if", "writing, software # distributed under the License is distributed on an \"AS IS\"", "zcharge in enumerate(gdata.qpivots[gdata.qpivots < 0]): if (tfreq[i][j] > 1e6*ifreq[i][j]) and (ifreq[i][j] > efreq[i][j]):", "radius, zcharge) class CollisionFrequency: \"\"\" Stores and computes collision frequencies \"\"\" def __init__(self,", "plasmasystem, grid_data): self.psys = plasmasystem self.gdata = grid_data self.tfrequency = TunnelFrequency(self.psys) self.rmesh, self.qmesh", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "utf-8 -*- \"\"\" This module contains the classes functions and helpers to compute", "License. # You may obtain a copy of the License at # #", "to particle to escape \"\"\" ainfinity = self.eaffinity ainf = ainfinity * INVKE/QE**2", "\"\"\" kte = (2.0/3.0)*energy*QE efreqfactor = 4.0 * PI * edensity * np.sqrt(kte/(2.0*PI*ME))", "idensity, efreq, ifreq, tfreq): \"\"\" Compute collision frequencies OML theory and Tunnel frequency", "= self.gdata rmesh2 = self.rmesh2 phid = self.phid efreq[:, gdata.qpivots < 0] =", "np.meshgrid(self.gdata.dpivots*0.5e-9, self.gdata.qpivots*QE, indexing='ij') self.rmesh, self.zmesh = np.meshgrid(self.gdata.dpivots*0.5e-9, self.gdata.qpivots, indexing='ij') self.rmesh2 = self.rmesh**2 self.phid", "and (ifreq[i][j] > efreq[i][j]): tfreq[i][j] = 1e6*ifreq[i][j] class Charging: \"\"\" Compute nanoparticle charging", "zcharge) class CollisionFrequency: \"\"\" Stores and computes collision frequencies \"\"\" def __init__(self, plasmasystem,", "frequencies \"\"\" self.coll.compute_collisionfreq(energy, edensity, idensity, self.efreq, self.ifreq, self.tfreq) def compute_plasmacharging(time, delta_t, grid_data, pchem,", "= const.pi KE = 1.0/(4.0*PI*const.epsilon_0) INVKE = 1.0/KE KB = const.Boltzmann QE =", "compliance with the License. # You may obtain a copy of the License", "= \"Beta\" import numpy as np import scipy.constants as const from scipy.integrate import", "-*- \"\"\" This module contains the classes functions and helpers to compute the", "ion_energy_from_temperature = (3.0/2.0) * KB * self.psys.ion_temperature ion_energy = (ion_energy_from_temperature + 0.5*self.psys.armass*self.ion_velocity*self.ion_velocity) kti", "* (1.0 - QE*phid[:, gdata.qpivots <= 0]/kti)) ifreq[:, gdata.qpivots > 0] = (ifreqfactor", "nanoparticle charging rate \"\"\" def __init__(self, collision_frequency, grid_data): \"\"\" \"\"\" self.coll = collision_frequency", "efreq[:, gdata.qpivots < 0] = (efreqfactor * rmesh2[:, gdata.qpivots < 0] * np.exp(QE*phid[:,", "CollisionFrequency: \"\"\" Stores and computes collision frequencies \"\"\" def __init__(self, plasmasystem, grid_data): self.psys", "zcharge, radius): return self.ptunnel(zcharge, radius) def rt_affinity(self, radius, zcharge): \"\"\" Computes rt_affinity to", "gdata.qpivots > 0]/kti)) for i, diam in enumerate(gdata.dpivots): for j, zcharge in enumerate(gdata.qpivots[gdata.qpivots", "\"\"\" self.coll.compute_collisionfreq(energy, edensity, idensity, self.efreq, self.ifreq, self.tfreq) def compute_plasmacharging(time, delta_t, grid_data, pchem, growth_data,", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "pchem.past_plasmadensity, method='BDF', dense_output=False, t_eval=[time, time+delta_t]) pchem.next_plasmadensity = np.nan_to_num(sol.y.T[-1]) # quasineutrality pchem.next_plasmadensity[3] = (pchem.next_plasmadensity[0]-pchem.nano_qdens", "self.rmesh**2 self.phid = coulomb_floatpotential(self.qmesh, self.rmesh) self.ion_velocity = 0.0 def compute_collisionfreq(self, energy, edensity, idensity,", "zcharge): \"\"\" Computes rt_affinity to particle to escape \"\"\" ainfinity = self.eaffinity ainf", "self.nchrgs)) self.ifreq = np.zeros((self.nvols, self.nchrgs)) self.tfreq = np.zeros((self.nvols, self.nchrgs)) def compute_freqs(self, energy, edensity,", "= const.Boltzmann QE = const.elementary_charge ME = const.electron_mass def coulomb_floatpotential(qcharge, radius): \"\"\" Floating", "< 0] * np.exp(QE*phid[:, gdata.qpivots < 0]/kte)) efreq[:, gdata.qpivots >= 0] = (efreqfactor", "\"Beta\" import numpy as np import scipy.constants as const from scipy.integrate import solve_ivp", "\"<NAME>\" __copyright__ = \"Copyright 2019\" __credits__ = [\"<NAME>\"] __license__ = \"Apache 2.0\" __version__", "computes collision frequencies \"\"\" def __init__(self, plasmasystem, grid_data): self.psys = plasmasystem self.gdata =", "4.0 * PI * edensity * np.sqrt(kte/(2.0*PI*ME)) ion_energy_from_temperature = (3.0/2.0) * KB *", "gdata.qpivots < 0]/kte)) efreq[:, gdata.qpivots >= 0] = (efreqfactor * rmesh2[:, gdata.qpivots >=", "not use this file except in compliance with the License. # You may", "__license__ = \"Apache 2.0\" __version__ = \"0.0.1\" __maintainer__ = \"<NAME>\" __email__ = \"<EMAIL>\"", "self.phid efreq[:, gdata.qpivots < 0] = (efreqfactor * rmesh2[:, gdata.qpivots < 0] *", "= self.rt_affinity(radius, zcharge) return prefac1*tunnel(rtaff, radius, zcharge) class CollisionFrequency: \"\"\" Stores and computes", "-*- coding: utf-8 -*- \"\"\" This module contains the classes functions and helpers", "Copyright 2019 <NAME> # # Licensed under the Apache License, Version 2.0 (the", "License, Version 2.0 (the \"License\"); # you may not use this file except", "= 1.0/KE KB = const.Boltzmann QE = const.elementary_charge ME = const.electron_mass def coulomb_floatpotential(qcharge,", "= solve_ivp(plasma_sys, [time, time+delta_t], pchem.past_plasmadensity, method='BDF', dense_output=False, t_eval=[time, time+delta_t]) pchem.next_plasmadensity = np.nan_to_num(sol.y.T[-1]) #", "self.ion_velocity = 0.0 def compute_collisionfreq(self, energy, edensity, idensity, efreq, ifreq, tfreq): \"\"\" Compute", "= pchem.get_system() sol = solve_ivp(plasma_sys, [time, time+delta_t], pchem.past_plasmadensity, method='BDF', dense_output=False, t_eval=[time, time+delta_t]) pchem.next_plasmadensity", "rmesh2 = self.rmesh2 phid = self.phid efreq[:, gdata.qpivots < 0] = (efreqfactor *", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "const from scipy.integrate import solve_ivp PI = const.pi KE = 1.0/(4.0*PI*const.epsilon_0) INVKE =", "\"\"\" Stores and computes collision frequencies \"\"\" def __init__(self, plasmasystem, grid_data): self.psys =", "Compute nanoparticle charging rate \"\"\" def __init__(self, collision_frequency, grid_data): \"\"\" \"\"\" self.coll =", "and helpers to compute the plasma. \"\"\" __author__ = \"<NAME>\" __copyright__ = \"Copyright", "plasma_sys.with_tunnel nel = pchem.past_plasmadensity[0] nar = pchem.past_plasmadensity[1] npdensity = growth_data.next_density ion_loss = np.sum(npdensity*charging.ifreq)/nar", "0.0, tunnel_gain, energy_gain, energy_loss]) nano_qdens = np.sum(npdensity*grid_data.qpivots) pchem.nano_qdens = nano_qdens nano_qdens_rate = np.sum(growth_data.qrate2d*grid_data.qpivots)", "\"\"\" Tunnel frequency \"\"\" prefac1 = (-zcharge)*np.sqrt(2.*const.Boltzmann*self.psys.temperature/ME)*(0.5/radius) rtaff = self.rt_affinity(radius, zcharge) return prefac1*tunnel(rtaff,", "np.sum(npdensity*grid_data.qpivots) pchem.nano_qdens = nano_qdens nano_qdens_rate = np.sum(growth_data.qrate2d*grid_data.qpivots) pchem.nano_qdens_rate = nano_qdens_rate plasma_sys = pchem.get_system()", "= (ifreqfactor * rmesh2[:, gdata.qpivots <= 0] * (1.0 - QE*phid[:, gdata.qpivots <=", "energy_gain, energy_loss]) nano_qdens = np.sum(npdensity*grid_data.qpivots) pchem.nano_qdens = nano_qdens nano_qdens_rate = np.sum(growth_data.qrate2d*grid_data.qpivots) pchem.nano_qdens_rate =", "# you may not use this file except in compliance with the License.", "= np.sqrt(2.*ME*particle_potenergy(rtaff, zcharge)) return np.exp(prefac1*prefac2*(rtaff*np.arccos(np.sqrt(radius/rtaff)) -np.sqrt(radius*(rtaff-radius)))) class TunnelFrequency: \"\"\" Computes electron tunnel frequency", "time+delta_t], pchem.past_plasmadensity, method='BDF', dense_output=False, t_eval=[time, time+delta_t]) pchem.next_plasmadensity = np.nan_to_num(sol.y.T[-1]) # quasineutrality pchem.next_plasmadensity[3] =", "grid_data): \"\"\" \"\"\" self.coll = collision_frequency self.grid_data = grid_data self.nvols = self.grid_data.nvols self.nchrgs", "rt_affinity(self, radius, zcharge): \"\"\" Computes rt_affinity to particle to escape \"\"\" ainfinity =", "agreed to in writing, software # distributed under the License is distributed on", "= plasma_sys.with_tunnel nel = pchem.past_plasmadensity[0] nar = pchem.past_plasmadensity[1] npdensity = growth_data.next_density ion_loss =", "Tunneling probability \"\"\" prefac1 = -2./const.hbar prefac2 = np.sqrt(2.*ME*particle_potenergy(rtaff, zcharge)) return np.exp(prefac1*prefac2*(rtaff*np.arccos(np.sqrt(radius/rtaff)) -np.sqrt(radius*(rtaff-radius))))", "prefac2 = np.sqrt(2.*ME*particle_potenergy(rtaff, zcharge)) return np.exp(prefac1*prefac2*(rtaff*np.arccos(np.sqrt(radius/rtaff)) -np.sqrt(radius*(rtaff-radius)))) class TunnelFrequency: \"\"\" Computes electron tunnel", "grid_data self.tfrequency = TunnelFrequency(self.psys) self.rmesh, self.qmesh = np.meshgrid(self.gdata.dpivots*0.5e-9, self.gdata.qpivots*QE, indexing='ij') self.rmesh, self.zmesh =", "kte = (2.0/3.0)*energy*QE efreqfactor = 4.0 * PI * edensity * np.sqrt(kte/(2.0*PI*ME)) ion_energy_from_temperature", "0]/kti)) ifreq[:, gdata.qpivots > 0] = (ifreqfactor * rmesh2[:, gdata.qpivots > 0] *", "(the \"License\"); # you may not use this file except in compliance with", "PI * edensity * np.sqrt(kte/(2.0*PI*ME)) ion_energy_from_temperature = (3.0/2.0) * KB * self.psys.ion_temperature ion_energy", "solve_ivp(plasma_sys, [time, time+delta_t], pchem.past_plasmadensity, method='BDF', dense_output=False, t_eval=[time, time+delta_t]) pchem.next_plasmadensity = np.nan_to_num(sol.y.T[-1]) # quasineutrality", "Charging: \"\"\" Compute nanoparticle charging rate \"\"\" def __init__(self, collision_frequency, grid_data): \"\"\" \"\"\"", "__copyright__ = \"Copyright 2019\" __credits__ = [\"<NAME>\"] __license__ = \"Apache 2.0\" __version__ =", "in enumerate(gdata.qpivots[gdata.qpivots < 0]): tfreq[i][j] = self.tfrequency(zcharge, 0.5e-9*diam) for i, diam in enumerate(gdata.dpivots):", "= self.phid efreq[:, gdata.qpivots < 0] = (efreqfactor * rmesh2[:, gdata.qpivots < 0]", "rmesh2[:, gdata.qpivots < 0] * np.exp(QE*phid[:, gdata.qpivots < 0]/kte)) efreq[:, gdata.qpivots >= 0]", "gdata.qpivots > 0] = (ifreqfactor * rmesh2[:, gdata.qpivots > 0] * np.exp(-QE*phid[:, gdata.qpivots", "0]/kti)) for i, diam in enumerate(gdata.dpivots): for j, zcharge in enumerate(gdata.qpivots[gdata.qpivots < 0]):", "# Unless required by applicable law or agreed to in writing, software #", "np.sum(npdensity*charging.ifreq)/nar electron_loss = np.sum(npdensity*charging.efreq)/nel energy_loss = np.sum(charging.coll.phid*npdensity*charging.efreq)/nel tunnel_gain = with_tunnel*np.sum(npdensity*charging.tfreq) energy_gain = with_tunnel*np.sum(charging.coll.phid*npdensity*charging.tfreq)", "by applicable law or agreed to in writing, software # distributed under the", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "Stores and computes collision frequencies \"\"\" def __init__(self, plasmasystem, grid_data): self.psys = plasmasystem", "idensity * np.sqrt(kti/(2.0*PI*self.psys.armass)) efreq.fill(0) ifreq.fill(0) tfreq.fill(0) gdata = self.gdata rmesh2 = self.rmesh2 phid", "Nanoparticle potential energy \"\"\" return -(KE*zcharge*QE**2)/radius def tunnel(rtaff, radius, zcharge): \"\"\" Tunneling probability", "1.0/(4.0*PI*const.epsilon_0) INVKE = 1.0/KE KB = const.Boltzmann QE = const.elementary_charge ME = const.electron_mass", "< 0] = (efreqfactor * rmesh2[:, gdata.qpivots < 0] * np.exp(QE*phid[:, gdata.qpivots <", "= np.sum(npdensity*charging.ifreq)/nar electron_loss = np.sum(npdensity*charging.efreq)/nel energy_loss = np.sum(charging.coll.phid*npdensity*charging.efreq)/nel tunnel_gain = with_tunnel*np.sum(npdensity*charging.tfreq) energy_gain =", "gdata.qpivots <= 0]/kti)) ifreq[:, gdata.qpivots > 0] = (ifreqfactor * rmesh2[:, gdata.qpivots >", "theory and Tunnel frequency \"\"\" kte = (2.0/3.0)*energy*QE efreqfactor = 4.0 * PI", "and Tunnel frequency \"\"\" kte = (2.0/3.0)*energy*QE efreqfactor = 4.0 * PI *", "growth_data.next_density ion_loss = np.sum(npdensity*charging.ifreq)/nar electron_loss = np.sum(npdensity*charging.efreq)/nel energy_loss = np.sum(charging.coll.phid*npdensity*charging.efreq)/nel tunnel_gain = with_tunnel*np.sum(npdensity*charging.tfreq)", "potential \"\"\" return KE*qcharge/radius def particle_potenergy(radius, zcharge): \"\"\" Nanoparticle potential energy \"\"\" return", "(3.0/2.0) * KB * self.psys.ion_temperature ion_energy = (ion_energy_from_temperature + 0.5*self.psys.armass*self.ion_velocity*self.ion_velocity) kti = (2.0/3.0)*ion_energy", "prefac1*tunnel(rtaff, radius, zcharge) class CollisionFrequency: \"\"\" Stores and computes collision frequencies \"\"\" def", "zcharge) return prefac1*tunnel(rtaff, radius, zcharge) class CollisionFrequency: \"\"\" Stores and computes collision frequencies", "file except in compliance with the License. # You may obtain a copy", "import scipy.constants as const from scipy.integrate import solve_ivp PI = const.pi KE =", "charging rate \"\"\" def __init__(self, collision_frequency, grid_data): \"\"\" \"\"\" self.coll = collision_frequency self.grid_data", "<= 0]/kti)) ifreq[:, gdata.qpivots > 0] = (ifreqfactor * rmesh2[:, gdata.qpivots > 0]", "rtaff < 0: return 1000000.0 else: rtaff[rtaff < 0] = 1000000.0 return rtaff", "tunnel_gain = with_tunnel*np.sum(npdensity*charging.tfreq) energy_gain = with_tunnel*np.sum(charging.coll.phid*npdensity*charging.tfreq) pchem.density_sourcedrain = np.array([electron_loss, ion_loss, 0.0, 0.0, tunnel_gain,", "import solve_ivp PI = const.pi KE = 1.0/(4.0*PI*const.epsilon_0) INVKE = 1.0/KE KB =", "License for the specific language governing permissions and # limitations under the License.", "in enumerate(gdata.dpivots): for j, zcharge in enumerate(gdata.qpivots[gdata.qpivots < 0]): tfreq[i][j] = self.tfrequency(zcharge, 0.5e-9*diam)", "* np.exp(QE*phid[:, gdata.qpivots < 0]/kte)) efreq[:, gdata.qpivots >= 0] = (efreqfactor * rmesh2[:,", "def coulomb_floatpotential(qcharge, radius): \"\"\" Floating potential \"\"\" return KE*qcharge/radius def particle_potenergy(radius, zcharge): \"\"\"", "np.sqrt(kti/(2.0*PI*self.psys.armass)) efreq.fill(0) ifreq.fill(0) tfreq.fill(0) gdata = self.gdata rmesh2 = self.rmesh2 phid = self.phid", "def compute_plasmacharging(time, delta_t, grid_data, pchem, growth_data, charging, plasma_sys): \"\"\" Solve the plasma densities", "= \"0.0.1\" __maintainer__ = \"<NAME>\" __email__ = \"<EMAIL>\" __status__ = \"Beta\" import numpy", "to in writing, software # distributed under the License is distributed on an", "const.pi KE = 1.0/(4.0*PI*const.epsilon_0) INVKE = 1.0/KE KB = const.Boltzmann QE = const.elementary_charge", "= 1000000.0 return rtaff def ptunnel(self, zcharge, radius): \"\"\" Tunnel frequency \"\"\" prefac1", "implied. # See the License for the specific language governing permissions and #", "class TunnelFrequency: \"\"\" Computes electron tunnel frequency \"\"\" def __init__(self, plasmasystem): self.psys =", "= (efreqfactor * rmesh2[:, gdata.qpivots >= 0] * (1.0 + QE*phid[:, gdata.qpivots >=", "\"License\"); # you may not use this file except in compliance with the", "= np.sum(growth_data.qrate2d*grid_data.qpivots) pchem.nano_qdens_rate = nano_qdens_rate plasma_sys = pchem.get_system() sol = solve_ivp(plasma_sys, [time, time+delta_t],", "module contains the classes functions and helpers to compute the plasma. \"\"\" __author__", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "to escape \"\"\" ainfinity = self.eaffinity ainf = ainfinity * INVKE/QE**2 rtaff =", "rmesh2[:, gdata.qpivots <= 0] * (1.0 - QE*phid[:, gdata.qpivots <= 0]/kti)) ifreq[:, gdata.qpivots", "self.nchrgs)) def compute_freqs(self, energy, edensity, idensity): \"\"\" Compute frequencies \"\"\" self.coll.compute_collisionfreq(energy, edensity, idensity,", "self.psys = plasmasystem self.eaffinity = 4.05*const.elementary_charge def __call__(self, zcharge, radius): return self.ptunnel(zcharge, radius)", "= np.meshgrid(self.gdata.dpivots*0.5e-9, self.gdata.qpivots*QE, indexing='ij') self.rmesh, self.zmesh = np.meshgrid(self.gdata.dpivots*0.5e-9, self.gdata.qpivots, indexing='ij') self.rmesh2 = self.rmesh**2", "energy_gain = with_tunnel*np.sum(charging.coll.phid*npdensity*charging.tfreq) pchem.density_sourcedrain = np.array([electron_loss, ion_loss, 0.0, 0.0, tunnel_gain, energy_gain, energy_loss]) nano_qdens", "= (2.0/3.0)*ion_energy ifreqfactor = 4.0 * PI * idensity * np.sqrt(kti/(2.0*PI*self.psys.armass)) efreq.fill(0) ifreq.fill(0)", "coulomb_floatpotential(self.qmesh, self.rmesh) self.ion_velocity = 0.0 def compute_collisionfreq(self, energy, edensity, idensity, efreq, ifreq, tfreq):", "ion_loss, 0.0, 0.0, tunnel_gain, energy_gain, energy_loss]) nano_qdens = np.sum(npdensity*grid_data.qpivots) pchem.nano_qdens = nano_qdens nano_qdens_rate", "(efreqfactor * rmesh2[:, gdata.qpivots >= 0] * (1.0 + QE*phid[:, gdata.qpivots >= 0]/kte))", "collision_frequency, grid_data): \"\"\" \"\"\" self.coll = collision_frequency self.grid_data = grid_data self.nvols = self.grid_data.nvols", "or implied. # See the License for the specific language governing permissions and", "- QE*phid[:, gdata.qpivots <= 0]/kti)) ifreq[:, gdata.qpivots > 0] = (ifreqfactor * rmesh2[:,", "gdata.qpivots <= 0] * (1.0 - QE*phid[:, gdata.qpivots <= 0]/kti)) ifreq[:, gdata.qpivots >", "= (ion_energy_from_temperature + 0.5*self.psys.armass*self.ion_velocity*self.ion_velocity) kti = (2.0/3.0)*ion_energy ifreqfactor = 4.0 * PI *", "+ QE*phid[:, gdata.qpivots >= 0]/kte)) ifreq[:, gdata.qpivots <= 0] = (ifreqfactor * rmesh2[:,", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "diam in enumerate(gdata.dpivots): for j, zcharge in enumerate(gdata.qpivots[gdata.qpivots < 0]): tfreq[i][j] = self.tfrequency(zcharge,", "const.Boltzmann QE = const.elementary_charge ME = const.electron_mass def coulomb_floatpotential(qcharge, radius): \"\"\" Floating potential", "self.nchrgs)) self.tfreq = np.zeros((self.nvols, self.nchrgs)) def compute_freqs(self, energy, edensity, idensity): \"\"\" Compute frequencies", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "__author__ = \"<NAME>\" __copyright__ = \"Copyright 2019\" __credits__ = [\"<NAME>\"] __license__ = \"Apache", "__init__(self, plasmasystem): self.psys = plasmasystem self.eaffinity = 4.05*const.elementary_charge def __call__(self, zcharge, radius): return", "if rtaff < 0: return 1000000.0 else: rtaff[rtaff < 0] = 1000000.0 return", "in writing, software # distributed under the License is distributed on an \"AS", "= self.eaffinity ainf = ainfinity * INVKE/QE**2 rtaff = zcharge/(zcharge/radius + ainf -", "pchem.nano_qdens = nano_qdens nano_qdens_rate = np.sum(growth_data.qrate2d*grid_data.qpivots) pchem.nano_qdens_rate = nano_qdens_rate plasma_sys = pchem.get_system() sol", "-(KE*zcharge*QE**2)/radius def tunnel(rtaff, radius, zcharge): \"\"\" Tunneling probability \"\"\" prefac1 = -2./const.hbar prefac2", "= (2.0/3.0)*energy*QE efreqfactor = 4.0 * PI * edensity * np.sqrt(kte/(2.0*PI*ME)) ion_energy_from_temperature =", "rmesh2[:, gdata.qpivots > 0] * np.exp(-QE*phid[:, gdata.qpivots > 0]/kti)) for i, diam in", "* rmesh2[:, gdata.qpivots < 0] * np.exp(QE*phid[:, gdata.qpivots < 0]/kte)) efreq[:, gdata.qpivots >=", "* idensity * np.sqrt(kti/(2.0*PI*self.psys.armass)) efreq.fill(0) ifreq.fill(0) tfreq.fill(0) gdata = self.gdata rmesh2 = self.rmesh2", "enumerate(gdata.dpivots): for j, zcharge in enumerate(gdata.qpivots[gdata.qpivots < 0]): tfreq[i][j] = self.tfrequency(zcharge, 0.5e-9*diam) for", "in enumerate(gdata.dpivots): for j, zcharge in enumerate(gdata.qpivots[gdata.qpivots < 0]): if (tfreq[i][j] > 1e6*ifreq[i][j])", "def particle_potenergy(radius, zcharge): \"\"\" Nanoparticle potential energy \"\"\" return -(KE*zcharge*QE**2)/radius def tunnel(rtaff, radius,", "__call__(self, zcharge, radius): return self.ptunnel(zcharge, radius) def rt_affinity(self, radius, zcharge): \"\"\" Computes rt_affinity", "# See the License for the specific language governing permissions and # limitations", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "def rt_affinity(self, radius, zcharge): \"\"\" Computes rt_affinity to particle to escape \"\"\" ainfinity", "rtaff def ptunnel(self, zcharge, radius): \"\"\" Tunnel frequency \"\"\" prefac1 = (-zcharge)*np.sqrt(2.*const.Boltzmann*self.psys.temperature/ME)*(0.5/radius) rtaff", "TunnelFrequency: \"\"\" Computes electron tunnel frequency \"\"\" def __init__(self, plasmasystem): self.psys = plasmasystem", "for j, zcharge in enumerate(gdata.qpivots[gdata.qpivots < 0]): if (tfreq[i][j] > 1e6*ifreq[i][j]) and (ifreq[i][j]", "# limitations under the License. # # -*- coding: utf-8 -*- \"\"\" This", "ifreq[:, gdata.qpivots > 0] = (ifreqfactor * rmesh2[:, gdata.qpivots > 0] * np.exp(-QE*phid[:,", "\"\"\" Solve the plasma densities \"\"\" with_tunnel = plasma_sys.with_tunnel nel = pchem.past_plasmadensity[0] nar", "(1.0 + QE*phid[:, gdata.qpivots >= 0]/kte)) ifreq[:, gdata.qpivots <= 0] = (ifreqfactor *", "= 4.0 * PI * edensity * np.sqrt(kte/(2.0*PI*ME)) ion_energy_from_temperature = (3.0/2.0) * KB", "# -*- coding: utf-8 -*- \"\"\" This module contains the classes functions and", "* edensity * np.sqrt(kte/(2.0*PI*ME)) ion_energy_from_temperature = (3.0/2.0) * KB * self.psys.ion_temperature ion_energy =", "\"<EMAIL>\" __status__ = \"Beta\" import numpy as np import scipy.constants as const from", "specific language governing permissions and # limitations under the License. # # -*-", "KB = const.Boltzmann QE = const.elementary_charge ME = const.electron_mass def coulomb_floatpotential(qcharge, radius): \"\"\"", "plasma_sys): \"\"\" Solve the plasma densities \"\"\" with_tunnel = plasma_sys.with_tunnel nel = pchem.past_plasmadensity[0]", "coulomb_floatpotential(qcharge, radius): \"\"\" Floating potential \"\"\" return KE*qcharge/radius def particle_potenergy(radius, zcharge): \"\"\" Nanoparticle", "ME = const.electron_mass def coulomb_floatpotential(qcharge, radius): \"\"\" Floating potential \"\"\" return KE*qcharge/radius def", "diam in enumerate(gdata.dpivots): for j, zcharge in enumerate(gdata.qpivots[gdata.qpivots < 0]): if (tfreq[i][j] >", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "np.zeros((self.nvols, self.nchrgs)) self.ifreq = np.zeros((self.nvols, self.nchrgs)) self.tfreq = np.zeros((self.nvols, self.nchrgs)) def compute_freqs(self, energy,", "__init__(self, plasmasystem, grid_data): self.psys = plasmasystem self.gdata = grid_data self.tfrequency = TunnelFrequency(self.psys) self.rmesh,", "self.eaffinity = 4.05*const.elementary_charge def __call__(self, zcharge, radius): return self.ptunnel(zcharge, radius) def rt_affinity(self, radius,", "you may not use this file except in compliance with the License. #", "gdata.qpivots < 0] * np.exp(QE*phid[:, gdata.qpivots < 0]/kte)) efreq[:, gdata.qpivots >= 0] =", "frequency \"\"\" def __init__(self, plasmasystem): self.psys = plasmasystem self.eaffinity = 4.05*const.elementary_charge def __call__(self,", "self.nvols = self.grid_data.nvols self.nchrgs = self.grid_data.nchrgs self.efreq = np.zeros((self.nvols, self.nchrgs)) self.ifreq = np.zeros((self.nvols,", "* rmesh2[:, gdata.qpivots <= 0] * (1.0 - QE*phid[:, gdata.qpivots <= 0]/kti)) ifreq[:,", "2019 <NAME> # # Licensed under the Apache License, Version 2.0 (the \"License\");", "particle_potenergy(radius, zcharge): \"\"\" Nanoparticle potential energy \"\"\" return -(KE*zcharge*QE**2)/radius def tunnel(rtaff, radius, zcharge):", "rmesh2[:, gdata.qpivots >= 0] * (1.0 + QE*phid[:, gdata.qpivots >= 0]/kte)) ifreq[:, gdata.qpivots", "self.nchrgs = self.grid_data.nchrgs self.efreq = np.zeros((self.nvols, self.nchrgs)) self.ifreq = np.zeros((self.nvols, self.nchrgs)) self.tfreq =", "= \"Apache 2.0\" __version__ = \"0.0.1\" __maintainer__ = \"<NAME>\" __email__ = \"<EMAIL>\" __status__", "= (3.0/2.0) * KB * self.psys.ion_temperature ion_energy = (ion_energy_from_temperature + 0.5*self.psys.armass*self.ion_velocity*self.ion_velocity) kti =", "Solve the plasma densities \"\"\" with_tunnel = plasma_sys.with_tunnel nel = pchem.past_plasmadensity[0] nar =", "use this file except in compliance with the License. # You may obtain", "gdata.qpivots < 0] = (efreqfactor * rmesh2[:, gdata.qpivots < 0] * np.exp(QE*phid[:, gdata.qpivots", "= const.electron_mass def coulomb_floatpotential(qcharge, radius): \"\"\" Floating potential \"\"\" return KE*qcharge/radius def particle_potenergy(radius,", "0]/kte)) ifreq[:, gdata.qpivots <= 0] = (ifreqfactor * rmesh2[:, gdata.qpivots <= 0] *", "for i, diam in enumerate(gdata.dpivots): for j, zcharge in enumerate(gdata.qpivots[gdata.qpivots < 0]): if", "= self.grid_data.nvols self.nchrgs = self.grid_data.nchrgs self.efreq = np.zeros((self.nvols, self.nchrgs)) self.ifreq = np.zeros((self.nvols, self.nchrgs))", "self.rmesh2 = self.rmesh**2 self.phid = coulomb_floatpotential(self.qmesh, self.rmesh) self.ion_velocity = 0.0 def compute_collisionfreq(self, energy,", "0.0 def compute_collisionfreq(self, energy, edensity, idensity, efreq, ifreq, tfreq): \"\"\" Compute collision frequencies", "np.exp(prefac1*prefac2*(rtaff*np.arccos(np.sqrt(radius/rtaff)) -np.sqrt(radius*(rtaff-radius)))) class TunnelFrequency: \"\"\" Computes electron tunnel frequency \"\"\" def __init__(self, plasmasystem):", "self.ifreq, self.tfreq) def compute_plasmacharging(time, delta_t, grid_data, pchem, growth_data, charging, plasma_sys): \"\"\" Solve the", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "j, zcharge in enumerate(gdata.qpivots[gdata.qpivots < 0]): tfreq[i][j] = self.tfrequency(zcharge, 0.5e-9*diam) for i, diam", "(tfreq[i][j] > 1e6*ifreq[i][j]) and (ifreq[i][j] > efreq[i][j]): tfreq[i][j] = 1e6*ifreq[i][j] class Charging: \"\"\"", "np.exp(-QE*phid[:, gdata.qpivots > 0]/kti)) for i, diam in enumerate(gdata.dpivots): for j, zcharge in", "(2.0/3.0)*energy*QE efreqfactor = 4.0 * PI * edensity * np.sqrt(kte/(2.0*PI*ME)) ion_energy_from_temperature = (3.0/2.0)", "2.0\" __version__ = \"0.0.1\" __maintainer__ = \"<NAME>\" __email__ = \"<EMAIL>\" __status__ = \"Beta\"", "> 0] * np.exp(-QE*phid[:, gdata.qpivots > 0]/kti)) for i, diam in enumerate(gdata.dpivots): for", "\"\"\" This module contains the classes functions and helpers to compute the plasma.", "zcharge): \"\"\" Nanoparticle potential energy \"\"\" return -(KE*zcharge*QE**2)/radius def tunnel(rtaff, radius, zcharge): \"\"\"", "with_tunnel*np.sum(npdensity*charging.tfreq) energy_gain = with_tunnel*np.sum(charging.coll.phid*npdensity*charging.tfreq) pchem.density_sourcedrain = np.array([electron_loss, ion_loss, 0.0, 0.0, tunnel_gain, energy_gain, energy_loss])", "plasmasystem): self.psys = plasmasystem self.eaffinity = 4.05*const.elementary_charge def __call__(self, zcharge, radius): return self.ptunnel(zcharge,", "radius): \"\"\" Tunnel frequency \"\"\" prefac1 = (-zcharge)*np.sqrt(2.*const.Boltzmann*self.psys.temperature/ME)*(0.5/radius) rtaff = self.rt_affinity(radius, zcharge) return", "< 0]/kte)) efreq[:, gdata.qpivots >= 0] = (efreqfactor * rmesh2[:, gdata.qpivots >= 0]", "pchem.nano_qdens_rate = nano_qdens_rate plasma_sys = pchem.get_system() sol = solve_ivp(plasma_sys, [time, time+delta_t], pchem.past_plasmadensity, method='BDF',", "2.0 (the \"License\"); # you may not use this file except in compliance", "nano_qdens_rate = np.sum(growth_data.qrate2d*grid_data.qpivots) pchem.nano_qdens_rate = nano_qdens_rate plasma_sys = pchem.get_system() sol = solve_ivp(plasma_sys, [time,", "= np.sum(charging.coll.phid*npdensity*charging.efreq)/nel tunnel_gain = with_tunnel*np.sum(npdensity*charging.tfreq) energy_gain = with_tunnel*np.sum(charging.coll.phid*npdensity*charging.tfreq) pchem.density_sourcedrain = np.array([electron_loss, ion_loss, 0.0,", "self.qmesh = np.meshgrid(self.gdata.dpivots*0.5e-9, self.gdata.qpivots*QE, indexing='ij') self.rmesh, self.zmesh = np.meshgrid(self.gdata.dpivots*0.5e-9, self.gdata.qpivots, indexing='ij') self.rmesh2 =", "the classes functions and helpers to compute the plasma. \"\"\" __author__ = \"<NAME>\"", "indexing='ij') self.rmesh, self.zmesh = np.meshgrid(self.gdata.dpivots*0.5e-9, self.gdata.qpivots, indexing='ij') self.rmesh2 = self.rmesh**2 self.phid = coulomb_floatpotential(self.qmesh,", "0] * (1.0 - QE*phid[:, gdata.qpivots <= 0]/kti)) ifreq[:, gdata.qpivots > 0] =", "for the specific language governing permissions and # limitations under the License. #", "= self.grid_data.nchrgs self.efreq = np.zeros((self.nvols, self.nchrgs)) self.ifreq = np.zeros((self.nvols, self.nchrgs)) self.tfreq = np.zeros((self.nvols,", "self.coll = collision_frequency self.grid_data = grid_data self.nvols = self.grid_data.nvols self.nchrgs = self.grid_data.nchrgs self.efreq", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "__credits__ = [\"<NAME>\"] __license__ = \"Apache 2.0\" __version__ = \"0.0.1\" __maintainer__ = \"<NAME>\"", "return KE*qcharge/radius def particle_potenergy(radius, zcharge): \"\"\" Nanoparticle potential energy \"\"\" return -(KE*zcharge*QE**2)/radius def", "# # Unless required by applicable law or agreed to in writing, software", "ainfinity = self.eaffinity ainf = ainfinity * INVKE/QE**2 rtaff = zcharge/(zcharge/radius + ainf", "express or implied. # See the License for the specific language governing permissions", "self.ptunnel(zcharge, radius) def rt_affinity(self, radius, zcharge): \"\"\" Computes rt_affinity to particle to escape", "< 0]): if (tfreq[i][j] > 1e6*ifreq[i][j]) and (ifreq[i][j] > efreq[i][j]): tfreq[i][j] = 1e6*ifreq[i][j]", "> 0]/kti)) for i, diam in enumerate(gdata.dpivots): for j, zcharge in enumerate(gdata.qpivots[gdata.qpivots <", "self.gdata.qpivots, indexing='ij') self.rmesh2 = self.rmesh**2 self.phid = coulomb_floatpotential(self.qmesh, self.rmesh) self.ion_velocity = 0.0 def", "plasma. \"\"\" __author__ = \"<NAME>\" __copyright__ = \"Copyright 2019\" __credits__ = [\"<NAME>\"] __license__", "efreq[:, gdata.qpivots >= 0] = (efreqfactor * rmesh2[:, gdata.qpivots >= 0] * (1.0", "= ainfinity * INVKE/QE**2 rtaff = zcharge/(zcharge/radius + ainf - (5.0/(8.0*radius))) if np.isscalar(rtaff):", "tfreq[i][j] = self.tfrequency(zcharge, 0.5e-9*diam) for i, diam in enumerate(gdata.dpivots): for j, zcharge in", "energy_loss]) nano_qdens = np.sum(npdensity*grid_data.qpivots) pchem.nano_qdens = nano_qdens nano_qdens_rate = np.sum(growth_data.qrate2d*grid_data.qpivots) pchem.nano_qdens_rate = nano_qdens_rate", "plasmasystem self.gdata = grid_data self.tfrequency = TunnelFrequency(self.psys) self.rmesh, self.qmesh = np.meshgrid(self.gdata.dpivots*0.5e-9, self.gdata.qpivots*QE, indexing='ij')", "= (efreqfactor * rmesh2[:, gdata.qpivots < 0] * np.exp(QE*phid[:, gdata.qpivots < 0]/kte)) efreq[:,", "= pchem.past_plasmadensity[0] nar = pchem.past_plasmadensity[1] npdensity = growth_data.next_density ion_loss = np.sum(npdensity*charging.ifreq)/nar electron_loss =", "either express or implied. # See the License for the specific language governing", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "0] = (efreqfactor * rmesh2[:, gdata.qpivots >= 0] * (1.0 + QE*phid[:, gdata.qpivots", "Computes electron tunnel frequency \"\"\" def __init__(self, plasmasystem): self.psys = plasmasystem self.eaffinity =", "nano_qdens_rate plasma_sys = pchem.get_system() sol = solve_ivp(plasma_sys, [time, time+delta_t], pchem.past_plasmadensity, method='BDF', dense_output=False, t_eval=[time,", "frequencies \"\"\" def __init__(self, plasmasystem, grid_data): self.psys = plasmasystem self.gdata = grid_data self.tfrequency", "ifreq.fill(0) tfreq.fill(0) gdata = self.gdata rmesh2 = self.rmesh2 phid = self.phid efreq[:, gdata.qpivots", ">= 0] = (efreqfactor * rmesh2[:, gdata.qpivots >= 0] * (1.0 + QE*phid[:,", "self.rmesh) self.ion_velocity = 0.0 def compute_collisionfreq(self, energy, edensity, idensity, efreq, ifreq, tfreq): \"\"\"", "> efreq[i][j]): tfreq[i][j] = 1e6*ifreq[i][j] class Charging: \"\"\" Compute nanoparticle charging rate \"\"\"", "plasma densities \"\"\" with_tunnel = plasma_sys.with_tunnel nel = pchem.past_plasmadensity[0] nar = pchem.past_plasmadensity[1] npdensity", "densities \"\"\" with_tunnel = plasma_sys.with_tunnel nel = pchem.past_plasmadensity[0] nar = pchem.past_plasmadensity[1] npdensity =", "np.sqrt(2.*ME*particle_potenergy(rtaff, zcharge)) return np.exp(prefac1*prefac2*(rtaff*np.arccos(np.sqrt(radius/rtaff)) -np.sqrt(radius*(rtaff-radius)))) class TunnelFrequency: \"\"\" Computes electron tunnel frequency \"\"\"", "< 0]): tfreq[i][j] = self.tfrequency(zcharge, 0.5e-9*diam) for i, diam in enumerate(gdata.dpivots): for j,", "the License. # You may obtain a copy of the License at #", "= coulomb_floatpotential(self.qmesh, self.rmesh) self.ion_velocity = 0.0 def compute_collisionfreq(self, energy, edensity, idensity, efreq, ifreq,", "grid_data): self.psys = plasmasystem self.gdata = grid_data self.tfrequency = TunnelFrequency(self.psys) self.rmesh, self.qmesh =", "\"\"\" self.coll = collision_frequency self.grid_data = grid_data self.nvols = self.grid_data.nvols self.nchrgs = self.grid_data.nchrgs", "* self.psys.ion_temperature ion_energy = (ion_energy_from_temperature + 0.5*self.psys.armass*self.ion_velocity*self.ion_velocity) kti = (2.0/3.0)*ion_energy ifreqfactor = 4.0", "Floating potential \"\"\" return KE*qcharge/radius def particle_potenergy(radius, zcharge): \"\"\" Nanoparticle potential energy \"\"\"", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "import numpy as np import scipy.constants as const from scipy.integrate import solve_ivp PI", "tunnel_gain, energy_gain, energy_loss]) nano_qdens = np.sum(npdensity*grid_data.qpivots) pchem.nano_qdens = nano_qdens nano_qdens_rate = np.sum(growth_data.qrate2d*grid_data.qpivots) pchem.nano_qdens_rate", "PI * idensity * np.sqrt(kti/(2.0*PI*self.psys.armass)) efreq.fill(0) ifreq.fill(0) tfreq.fill(0) gdata = self.gdata rmesh2 =", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "np.sqrt(kte/(2.0*PI*ME)) ion_energy_from_temperature = (3.0/2.0) * KB * self.psys.ion_temperature ion_energy = (ion_energy_from_temperature + 0.5*self.psys.armass*self.ion_velocity*self.ion_velocity)", "governing permissions and # limitations under the License. # # -*- coding: utf-8", "particle to escape \"\"\" ainfinity = self.eaffinity ainf = ainfinity * INVKE/QE**2 rtaff", "electron_loss = np.sum(npdensity*charging.efreq)/nel energy_loss = np.sum(charging.coll.phid*npdensity*charging.efreq)/nel tunnel_gain = with_tunnel*np.sum(npdensity*charging.tfreq) energy_gain = with_tunnel*np.sum(charging.coll.phid*npdensity*charging.tfreq) pchem.density_sourcedrain", "permissions and # limitations under the License. # # -*- coding: utf-8 -*-", "contains the classes functions and helpers to compute the plasma. \"\"\" __author__ =", "edensity, idensity, efreq, ifreq, tfreq): \"\"\" Compute collision frequencies OML theory and Tunnel", "self.rmesh2 phid = self.phid efreq[:, gdata.qpivots < 0] = (efreqfactor * rmesh2[:, gdata.qpivots", "= 4.05*const.elementary_charge def __call__(self, zcharge, radius): return self.ptunnel(zcharge, radius) def rt_affinity(self, radius, zcharge):", "PI = const.pi KE = 1.0/(4.0*PI*const.epsilon_0) INVKE = 1.0/KE KB = const.Boltzmann QE", "= grid_data self.tfrequency = TunnelFrequency(self.psys) self.rmesh, self.qmesh = np.meshgrid(self.gdata.dpivots*0.5e-9, self.gdata.qpivots*QE, indexing='ij') self.rmesh, self.zmesh", "np.zeros((self.nvols, self.nchrgs)) self.tfreq = np.zeros((self.nvols, self.nchrgs)) def compute_freqs(self, energy, edensity, idensity): \"\"\" Compute", "np.meshgrid(self.gdata.dpivots*0.5e-9, self.gdata.qpivots, indexing='ij') self.rmesh2 = self.rmesh**2 self.phid = coulomb_floatpotential(self.qmesh, self.rmesh) self.ion_velocity = 0.0", "ainf - (5.0/(8.0*radius))) if np.isscalar(rtaff): if rtaff < 0: return 1000000.0 else: rtaff[rtaff", "growth_data, charging, plasma_sys): \"\"\" Solve the plasma densities \"\"\" with_tunnel = plasma_sys.with_tunnel nel", "gdata.qpivots >= 0] * (1.0 + QE*phid[:, gdata.qpivots >= 0]/kte)) ifreq[:, gdata.qpivots <=", "< 0: return 1000000.0 else: rtaff[rtaff < 0] = 1000000.0 return rtaff def", "compute_freqs(self, energy, edensity, idensity): \"\"\" Compute frequencies \"\"\" self.coll.compute_collisionfreq(energy, edensity, idensity, self.efreq, self.ifreq,", "self.phid = coulomb_floatpotential(self.qmesh, self.rmesh) self.ion_velocity = 0.0 def compute_collisionfreq(self, energy, edensity, idensity, efreq,", "self.zmesh = np.meshgrid(self.gdata.dpivots*0.5e-9, self.gdata.qpivots, indexing='ij') self.rmesh2 = self.rmesh**2 self.phid = coulomb_floatpotential(self.qmesh, self.rmesh) self.ion_velocity", "and # limitations under the License. # # -*- coding: utf-8 -*- \"\"\"", "def __init__(self, plasmasystem): self.psys = plasmasystem self.eaffinity = 4.05*const.elementary_charge def __call__(self, zcharge, radius):", "with the License. # You may obtain a copy of the License at", "= plasmasystem self.eaffinity = 4.05*const.elementary_charge def __call__(self, zcharge, radius): return self.ptunnel(zcharge, radius) def", "1000000.0 else: rtaff[rtaff < 0] = 1000000.0 return rtaff def ptunnel(self, zcharge, radius):", "self.grid_data.nvols self.nchrgs = self.grid_data.nchrgs self.efreq = np.zeros((self.nvols, self.nchrgs)) self.ifreq = np.zeros((self.nvols, self.nchrgs)) self.tfreq", "charging, plasma_sys): \"\"\" Solve the plasma densities \"\"\" with_tunnel = plasma_sys.with_tunnel nel =", "(5.0/(8.0*radius))) if np.isscalar(rtaff): if rtaff < 0: return 1000000.0 else: rtaff[rtaff < 0]", "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "under the License. # # -*- coding: utf-8 -*- \"\"\" This module contains", "phid = self.phid efreq[:, gdata.qpivots < 0] = (efreqfactor * rmesh2[:, gdata.qpivots <", "* PI * idensity * np.sqrt(kti/(2.0*PI*self.psys.armass)) efreq.fill(0) ifreq.fill(0) tfreq.fill(0) gdata = self.gdata rmesh2", "pchem, growth_data, charging, plasma_sys): \"\"\" Solve the plasma densities \"\"\" with_tunnel = plasma_sys.with_tunnel", "= self.rmesh2 phid = self.phid efreq[:, gdata.qpivots < 0] = (efreqfactor * rmesh2[:,", "self.rmesh, self.zmesh = np.meshgrid(self.gdata.dpivots*0.5e-9, self.gdata.qpivots, indexing='ij') self.rmesh2 = self.rmesh**2 self.phid = coulomb_floatpotential(self.qmesh, self.rmesh)", "0]): if (tfreq[i][j] > 1e6*ifreq[i][j]) and (ifreq[i][j] > efreq[i][j]): tfreq[i][j] = 1e6*ifreq[i][j] class", "frequencies OML theory and Tunnel frequency \"\"\" kte = (2.0/3.0)*energy*QE efreqfactor = 4.0", "\"\"\" ainfinity = self.eaffinity ainf = ainfinity * INVKE/QE**2 rtaff = zcharge/(zcharge/radius +", "rt_affinity to particle to escape \"\"\" ainfinity = self.eaffinity ainf = ainfinity *", "if (tfreq[i][j] > 1e6*ifreq[i][j]) and (ifreq[i][j] > efreq[i][j]): tfreq[i][j] = 1e6*ifreq[i][j] class Charging:", "__status__ = \"Beta\" import numpy as np import scipy.constants as const from scipy.integrate", "4.0 * PI * idensity * np.sqrt(kti/(2.0*PI*self.psys.armass)) efreq.fill(0) ifreq.fill(0) tfreq.fill(0) gdata = self.gdata", "potential energy \"\"\" return -(KE*zcharge*QE**2)/radius def tunnel(rtaff, radius, zcharge): \"\"\" Tunneling probability \"\"\"", "kti = (2.0/3.0)*ion_energy ifreqfactor = 4.0 * PI * idensity * np.sqrt(kti/(2.0*PI*self.psys.armass)) efreq.fill(0)", "law or agreed to in writing, software # distributed under the License is", "the License for the specific language governing permissions and # limitations under the", "the plasma. \"\"\" __author__ = \"<NAME>\" __copyright__ = \"Copyright 2019\" __credits__ = [\"<NAME>\"]", "* np.sqrt(kte/(2.0*PI*ME)) ion_energy_from_temperature = (3.0/2.0) * KB * self.psys.ion_temperature ion_energy = (ion_energy_from_temperature +", "for j, zcharge in enumerate(gdata.qpivots[gdata.qpivots < 0]): tfreq[i][j] = self.tfrequency(zcharge, 0.5e-9*diam) for i,", "\"<NAME>\" __email__ = \"<EMAIL>\" __status__ = \"Beta\" import numpy as np import scipy.constants", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "ainfinity * INVKE/QE**2 rtaff = zcharge/(zcharge/radius + ainf - (5.0/(8.0*radius))) if np.isscalar(rtaff): if", "* KB * self.psys.ion_temperature ion_energy = (ion_energy_from_temperature + 0.5*self.psys.armass*self.ion_velocity*self.ion_velocity) kti = (2.0/3.0)*ion_energy ifreqfactor", "1.0/KE KB = const.Boltzmann QE = const.elementary_charge ME = const.electron_mass def coulomb_floatpotential(qcharge, radius):", "> 0] = (ifreqfactor * rmesh2[:, gdata.qpivots > 0] * np.exp(-QE*phid[:, gdata.qpivots >", "return rtaff def ptunnel(self, zcharge, radius): \"\"\" Tunnel frequency \"\"\" prefac1 = (-zcharge)*np.sqrt(2.*const.Boltzmann*self.psys.temperature/ME)*(0.5/radius)", "ifreqfactor = 4.0 * PI * idensity * np.sqrt(kti/(2.0*PI*self.psys.armass)) efreq.fill(0) ifreq.fill(0) tfreq.fill(0) gdata", "0] = (ifreqfactor * rmesh2[:, gdata.qpivots > 0] * np.exp(-QE*phid[:, gdata.qpivots > 0]/kti))", "QE*phid[:, gdata.qpivots >= 0]/kte)) ifreq[:, gdata.qpivots <= 0] = (ifreqfactor * rmesh2[:, gdata.qpivots", "\"\"\" Tunneling probability \"\"\" prefac1 = -2./const.hbar prefac2 = np.sqrt(2.*ME*particle_potenergy(rtaff, zcharge)) return np.exp(prefac1*prefac2*(rtaff*np.arccos(np.sqrt(radius/rtaff))", "tfreq[i][j] = 1e6*ifreq[i][j] class Charging: \"\"\" Compute nanoparticle charging rate \"\"\" def __init__(self,", "INVKE/QE**2 rtaff = zcharge/(zcharge/radius + ainf - (5.0/(8.0*radius))) if np.isscalar(rtaff): if rtaff <", "* INVKE/QE**2 rtaff = zcharge/(zcharge/radius + ainf - (5.0/(8.0*radius))) if np.isscalar(rtaff): if rtaff", "classes functions and helpers to compute the plasma. \"\"\" __author__ = \"<NAME>\" __copyright__", "* rmesh2[:, gdata.qpivots > 0] * np.exp(-QE*phid[:, gdata.qpivots > 0]/kti)) for i, diam", "(ifreq[i][j] > efreq[i][j]): tfreq[i][j] = 1e6*ifreq[i][j] class Charging: \"\"\" Compute nanoparticle charging rate", "\"\"\" __author__ = \"<NAME>\" __copyright__ = \"Copyright 2019\" __credits__ = [\"<NAME>\"] __license__ =", "zcharge in enumerate(gdata.qpivots[gdata.qpivots < 0]): tfreq[i][j] = self.tfrequency(zcharge, 0.5e-9*diam) for i, diam in", "if np.isscalar(rtaff): if rtaff < 0: return 1000000.0 else: rtaff[rtaff < 0] =", "in compliance with the License. # You may obtain a copy of the", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "= const.elementary_charge ME = const.electron_mass def coulomb_floatpotential(qcharge, radius): \"\"\" Floating potential \"\"\" return", "0]/kte)) efreq[:, gdata.qpivots >= 0] = (efreqfactor * rmesh2[:, gdata.qpivots >= 0] *", "edensity, idensity): \"\"\" Compute frequencies \"\"\" self.coll.compute_collisionfreq(energy, edensity, idensity, self.efreq, self.ifreq, self.tfreq) def", "= np.meshgrid(self.gdata.dpivots*0.5e-9, self.gdata.qpivots, indexing='ij') self.rmesh2 = self.rmesh**2 self.phid = coulomb_floatpotential(self.qmesh, self.rmesh) self.ion_velocity =", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "return 1000000.0 else: rtaff[rtaff < 0] = 1000000.0 return rtaff def ptunnel(self, zcharge,", "[time, time+delta_t], pchem.past_plasmadensity, method='BDF', dense_output=False, t_eval=[time, time+delta_t]) pchem.next_plasmadensity = np.nan_to_num(sol.y.T[-1]) # quasineutrality pchem.next_plasmadensity[3]", "energy, edensity, idensity, efreq, ifreq, tfreq): \"\"\" Compute collision frequencies OML theory and", "Computes rt_affinity to particle to escape \"\"\" ainfinity = self.eaffinity ainf = ainfinity", "See the License for the specific language governing permissions and # limitations under", "edensity * np.sqrt(kte/(2.0*PI*ME)) ion_energy_from_temperature = (3.0/2.0) * KB * self.psys.ion_temperature ion_energy = (ion_energy_from_temperature", "nano_qdens nano_qdens_rate = np.sum(growth_data.qrate2d*grid_data.qpivots) pchem.nano_qdens_rate = nano_qdens_rate plasma_sys = pchem.get_system() sol = solve_ivp(plasma_sys,", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "QE*phid[:, gdata.qpivots <= 0]/kti)) ifreq[:, gdata.qpivots > 0] = (ifreqfactor * rmesh2[:, gdata.qpivots", "return prefac1*tunnel(rtaff, radius, zcharge) class CollisionFrequency: \"\"\" Stores and computes collision frequencies \"\"\"", "(ifreqfactor * rmesh2[:, gdata.qpivots <= 0] * (1.0 - QE*phid[:, gdata.qpivots <= 0]/kti))", "frequency \"\"\" kte = (2.0/3.0)*energy*QE efreqfactor = 4.0 * PI * edensity *", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "as const from scipy.integrate import solve_ivp PI = const.pi KE = 1.0/(4.0*PI*const.epsilon_0) INVKE", "> 1e6*ifreq[i][j]) and (ifreq[i][j] > efreq[i][j]): tfreq[i][j] = 1e6*ifreq[i][j] class Charging: \"\"\" Compute", "= grid_data self.nvols = self.grid_data.nvols self.nchrgs = self.grid_data.nchrgs self.efreq = np.zeros((self.nvols, self.nchrgs)) self.ifreq", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "(ion_energy_from_temperature + 0.5*self.psys.armass*self.ion_velocity*self.ion_velocity) kti = (2.0/3.0)*ion_energy ifreqfactor = 4.0 * PI * idensity", "0.5e-9*diam) for i, diam in enumerate(gdata.dpivots): for j, zcharge in enumerate(gdata.qpivots[gdata.qpivots < 0]):", "i, diam in enumerate(gdata.dpivots): for j, zcharge in enumerate(gdata.qpivots[gdata.qpivots < 0]): tfreq[i][j] =", "def tunnel(rtaff, radius, zcharge): \"\"\" Tunneling probability \"\"\" prefac1 = -2./const.hbar prefac2 =", "0] * np.exp(-QE*phid[:, gdata.qpivots > 0]/kti)) for i, diam in enumerate(gdata.dpivots): for j,", "<= 0] * (1.0 - QE*phid[:, gdata.qpivots <= 0]/kti)) ifreq[:, gdata.qpivots > 0]", "in enumerate(gdata.qpivots[gdata.qpivots < 0]): if (tfreq[i][j] > 1e6*ifreq[i][j]) and (ifreq[i][j] > efreq[i][j]): tfreq[i][j]", "= self.rmesh**2 self.phid = coulomb_floatpotential(self.qmesh, self.rmesh) self.ion_velocity = 0.0 def compute_collisionfreq(self, energy, edensity,", "<reponame>caos21/Grodi # Copyright 2019 <NAME> # # Licensed under the Apache License, Version", "enumerate(gdata.dpivots): for j, zcharge in enumerate(gdata.qpivots[gdata.qpivots < 0]): if (tfreq[i][j] > 1e6*ifreq[i][j]) and", "tunnel(rtaff, radius, zcharge): \"\"\" Tunneling probability \"\"\" prefac1 = -2./const.hbar prefac2 = np.sqrt(2.*ME*particle_potenergy(rtaff,", "= \"<NAME>\" __email__ = \"<EMAIL>\" __status__ = \"Beta\" import numpy as np import", "0: return 1000000.0 else: rtaff[rtaff < 0] = 1000000.0 return rtaff def ptunnel(self,", "zcharge)) return np.exp(prefac1*prefac2*(rtaff*np.arccos(np.sqrt(radius/rtaff)) -np.sqrt(radius*(rtaff-radius)))) class TunnelFrequency: \"\"\" Computes electron tunnel frequency \"\"\" def", "ion_energy = (ion_energy_from_temperature + 0.5*self.psys.armass*self.ion_velocity*self.ion_velocity) kti = (2.0/3.0)*ion_energy ifreqfactor = 4.0 * PI", "0] = (ifreqfactor * rmesh2[:, gdata.qpivots <= 0] * (1.0 - QE*phid[:, gdata.qpivots", "pchem.past_plasmadensity[1] npdensity = growth_data.next_density ion_loss = np.sum(npdensity*charging.ifreq)/nar electron_loss = np.sum(npdensity*charging.efreq)/nel energy_loss = np.sum(charging.coll.phid*npdensity*charging.efreq)/nel", "collision_frequency self.grid_data = grid_data self.nvols = self.grid_data.nvols self.nchrgs = self.grid_data.nchrgs self.efreq = np.zeros((self.nvols,", "= collision_frequency self.grid_data = grid_data self.nvols = self.grid_data.nvols self.nchrgs = self.grid_data.nchrgs self.efreq =", "energy, edensity, idensity): \"\"\" Compute frequencies \"\"\" self.coll.compute_collisionfreq(energy, edensity, idensity, self.efreq, self.ifreq, self.tfreq)", "= np.sum(npdensity*charging.efreq)/nel energy_loss = np.sum(charging.coll.phid*npdensity*charging.efreq)/nel tunnel_gain = with_tunnel*np.sum(npdensity*charging.tfreq) energy_gain = with_tunnel*np.sum(charging.coll.phid*npdensity*charging.tfreq) pchem.density_sourcedrain =", "\"\"\" Compute collision frequencies OML theory and Tunnel frequency \"\"\" kte = (2.0/3.0)*energy*QE", "+ 0.5*self.psys.armass*self.ion_velocity*self.ion_velocity) kti = (2.0/3.0)*ion_energy ifreqfactor = 4.0 * PI * idensity *", "self.efreq, self.ifreq, self.tfreq) def compute_plasmacharging(time, delta_t, grid_data, pchem, growth_data, charging, plasma_sys): \"\"\" Solve", "= (ifreqfactor * rmesh2[:, gdata.qpivots > 0] * np.exp(-QE*phid[:, gdata.qpivots > 0]/kti)) for", "efreq.fill(0) ifreq.fill(0) tfreq.fill(0) gdata = self.gdata rmesh2 = self.rmesh2 phid = self.phid efreq[:,", "self.gdata.qpivots*QE, indexing='ij') self.rmesh, self.zmesh = np.meshgrid(self.gdata.dpivots*0.5e-9, self.gdata.qpivots, indexing='ij') self.rmesh2 = self.rmesh**2 self.phid =", "= self.tfrequency(zcharge, 0.5e-9*diam) for i, diam in enumerate(gdata.dpivots): for j, zcharge in enumerate(gdata.qpivots[gdata.qpivots", "class Charging: \"\"\" Compute nanoparticle charging rate \"\"\" def __init__(self, collision_frequency, grid_data): \"\"\"", "\"\"\" def __init__(self, collision_frequency, grid_data): \"\"\" \"\"\" self.coll = collision_frequency self.grid_data = grid_data", "= np.zeros((self.nvols, self.nchrgs)) self.ifreq = np.zeros((self.nvols, self.nchrgs)) self.tfreq = np.zeros((self.nvols, self.nchrgs)) def compute_freqs(self,", "Version 2.0 (the \"License\"); # you may not use this file except in", "except in compliance with the License. # You may obtain a copy of", "0] * (1.0 + QE*phid[:, gdata.qpivots >= 0]/kte)) ifreq[:, gdata.qpivots <= 0] =", "\"\"\" return KE*qcharge/radius def particle_potenergy(radius, zcharge): \"\"\" Nanoparticle potential energy \"\"\" return -(KE*zcharge*QE**2)/radius", "0] = 1000000.0 return rtaff def ptunnel(self, zcharge, radius): \"\"\" Tunnel frequency \"\"\"", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "# # -*- coding: utf-8 -*- \"\"\" This module contains the classes functions", "* rmesh2[:, gdata.qpivots >= 0] * (1.0 + QE*phid[:, gdata.qpivots >= 0]/kte)) ifreq[:,", "[\"<NAME>\"] __license__ = \"Apache 2.0\" __version__ = \"0.0.1\" __maintainer__ = \"<NAME>\" __email__ =", "j, zcharge in enumerate(gdata.qpivots[gdata.qpivots < 0]): if (tfreq[i][j] > 1e6*ifreq[i][j]) and (ifreq[i][j] >", "pchem.get_system() sol = solve_ivp(plasma_sys, [time, time+delta_t], pchem.past_plasmadensity, method='BDF', dense_output=False, t_eval=[time, time+delta_t]) pchem.next_plasmadensity =", "2019\" __credits__ = [\"<NAME>\"] __license__ = \"Apache 2.0\" __version__ = \"0.0.1\" __maintainer__ =", "rtaff = zcharge/(zcharge/radius + ainf - (5.0/(8.0*radius))) if np.isscalar(rtaff): if rtaff < 0:", "language governing permissions and # limitations under the License. # # -*- coding:", "edensity, idensity, self.efreq, self.ifreq, self.tfreq) def compute_plasmacharging(time, delta_t, grid_data, pchem, growth_data, charging, plasma_sys):", "np.sum(npdensity*charging.efreq)/nel energy_loss = np.sum(charging.coll.phid*npdensity*charging.efreq)/nel tunnel_gain = with_tunnel*np.sum(npdensity*charging.tfreq) energy_gain = with_tunnel*np.sum(charging.coll.phid*npdensity*charging.tfreq) pchem.density_sourcedrain = np.array([electron_loss,", "rtaff[rtaff < 0] = 1000000.0 return rtaff def ptunnel(self, zcharge, radius): \"\"\" Tunnel", "* PI * edensity * np.sqrt(kte/(2.0*PI*ME)) ion_energy_from_temperature = (3.0/2.0) * KB * self.psys.ion_temperature", "self.tfrequency(zcharge, 0.5e-9*diam) for i, diam in enumerate(gdata.dpivots): for j, zcharge in enumerate(gdata.qpivots[gdata.qpivots <", "= (-zcharge)*np.sqrt(2.*const.Boltzmann*self.psys.temperature/ME)*(0.5/radius) rtaff = self.rt_affinity(radius, zcharge) return prefac1*tunnel(rtaff, radius, zcharge) class CollisionFrequency: \"\"\"", "coding: utf-8 -*- \"\"\" This module contains the classes functions and helpers to", "= plasmasystem self.gdata = grid_data self.tfrequency = TunnelFrequency(self.psys) self.rmesh, self.qmesh = np.meshgrid(self.gdata.dpivots*0.5e-9, self.gdata.qpivots*QE,", "gdata.qpivots >= 0] = (efreqfactor * rmesh2[:, gdata.qpivots >= 0] * (1.0 +", "npdensity = growth_data.next_density ion_loss = np.sum(npdensity*charging.ifreq)/nar electron_loss = np.sum(npdensity*charging.efreq)/nel energy_loss = np.sum(charging.coll.phid*npdensity*charging.efreq)/nel tunnel_gain", "compute_collisionfreq(self, energy, edensity, idensity, efreq, ifreq, tfreq): \"\"\" Compute collision frequencies OML theory", "rate \"\"\" def __init__(self, collision_frequency, grid_data): \"\"\" \"\"\" self.coll = collision_frequency self.grid_data =", "4.05*const.elementary_charge def __call__(self, zcharge, radius): return self.ptunnel(zcharge, radius) def rt_affinity(self, radius, zcharge): \"\"\"", "ifreq, tfreq): \"\"\" Compute collision frequencies OML theory and Tunnel frequency \"\"\" kte", "self.grid_data = grid_data self.nvols = self.grid_data.nvols self.nchrgs = self.grid_data.nchrgs self.efreq = np.zeros((self.nvols, self.nchrgs))", "def __init__(self, collision_frequency, grid_data): \"\"\" \"\"\" self.coll = collision_frequency self.grid_data = grid_data self.nvols", "the specific language governing permissions and # limitations under the License. # #", "def ptunnel(self, zcharge, radius): \"\"\" Tunnel frequency \"\"\" prefac1 = (-zcharge)*np.sqrt(2.*const.Boltzmann*self.psys.temperature/ME)*(0.5/radius) rtaff =", "+ ainf - (5.0/(8.0*radius))) if np.isscalar(rtaff): if rtaff < 0: return 1000000.0 else:", "* np.exp(-QE*phid[:, gdata.qpivots > 0]/kti)) for i, diam in enumerate(gdata.dpivots): for j, zcharge", "compute_plasmacharging(time, delta_t, grid_data, pchem, growth_data, charging, plasma_sys): \"\"\" Solve the plasma densities \"\"\"", "= [\"<NAME>\"] __license__ = \"Apache 2.0\" __version__ = \"0.0.1\" __maintainer__ = \"<NAME>\" __email__", "frequency \"\"\" prefac1 = (-zcharge)*np.sqrt(2.*const.Boltzmann*self.psys.temperature/ME)*(0.5/radius) rtaff = self.rt_affinity(radius, zcharge) return prefac1*tunnel(rtaff, radius, zcharge)", "compute the plasma. \"\"\" __author__ = \"<NAME>\" __copyright__ = \"Copyright 2019\" __credits__ =", "radius): \"\"\" Floating potential \"\"\" return KE*qcharge/radius def particle_potenergy(radius, zcharge): \"\"\" Nanoparticle potential", "KE = 1.0/(4.0*PI*const.epsilon_0) INVKE = 1.0/KE KB = const.Boltzmann QE = const.elementary_charge ME", "functions and helpers to compute the plasma. \"\"\" __author__ = \"<NAME>\" __copyright__ =", "radius, zcharge): \"\"\" Tunneling probability \"\"\" prefac1 = -2./const.hbar prefac2 = np.sqrt(2.*ME*particle_potenergy(rtaff, zcharge))", "\"\"\" Floating potential \"\"\" return KE*qcharge/radius def particle_potenergy(radius, zcharge): \"\"\" Nanoparticle potential energy", ">= 0] * (1.0 + QE*phid[:, gdata.qpivots >= 0]/kte)) ifreq[:, gdata.qpivots <= 0]", "self.coll.compute_collisionfreq(energy, edensity, idensity, self.efreq, self.ifreq, self.tfreq) def compute_plasmacharging(time, delta_t, grid_data, pchem, growth_data, charging,", "= np.array([electron_loss, ion_loss, 0.0, 0.0, tunnel_gain, energy_gain, energy_loss]) nano_qdens = np.sum(npdensity*grid_data.qpivots) pchem.nano_qdens =", "Compute collision frequencies OML theory and Tunnel frequency \"\"\" kte = (2.0/3.0)*energy*QE efreqfactor", "gdata = self.gdata rmesh2 = self.rmesh2 phid = self.phid efreq[:, gdata.qpivots < 0]", "1e6*ifreq[i][j]) and (ifreq[i][j] > efreq[i][j]): tfreq[i][j] = 1e6*ifreq[i][j] class Charging: \"\"\" Compute nanoparticle", "self.tfreq = np.zeros((self.nvols, self.nchrgs)) def compute_freqs(self, energy, edensity, idensity): \"\"\" Compute frequencies \"\"\"", "= 0.0 def compute_collisionfreq(self, energy, edensity, idensity, efreq, ifreq, tfreq): \"\"\" Compute collision", "= -2./const.hbar prefac2 = np.sqrt(2.*ME*particle_potenergy(rtaff, zcharge)) return np.exp(prefac1*prefac2*(rtaff*np.arccos(np.sqrt(radius/rtaff)) -np.sqrt(radius*(rtaff-radius)))) class TunnelFrequency: \"\"\" Computes", "grid_data, pchem, growth_data, charging, plasma_sys): \"\"\" Solve the plasma densities \"\"\" with_tunnel =", "= \"<EMAIL>\" __status__ = \"Beta\" import numpy as np import scipy.constants as const", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "0]): tfreq[i][j] = self.tfrequency(zcharge, 0.5e-9*diam) for i, diam in enumerate(gdata.dpivots): for j, zcharge", "\"\"\" \"\"\" self.coll = collision_frequency self.grid_data = grid_data self.nvols = self.grid_data.nvols self.nchrgs =", "self.ifreq = np.zeros((self.nvols, self.nchrgs)) self.tfreq = np.zeros((self.nvols, self.nchrgs)) def compute_freqs(self, energy, edensity, idensity):", "= pchem.past_plasmadensity[1] npdensity = growth_data.next_density ion_loss = np.sum(npdensity*charging.ifreq)/nar electron_loss = np.sum(npdensity*charging.efreq)/nel energy_loss =", "def compute_collisionfreq(self, energy, edensity, idensity, efreq, ifreq, tfreq): \"\"\" Compute collision frequencies OML", "return -(KE*zcharge*QE**2)/radius def tunnel(rtaff, radius, zcharge): \"\"\" Tunneling probability \"\"\" prefac1 = -2./const.hbar", "for i, diam in enumerate(gdata.dpivots): for j, zcharge in enumerate(gdata.qpivots[gdata.qpivots < 0]): tfreq[i][j]", "nano_qdens = np.sum(npdensity*grid_data.qpivots) pchem.nano_qdens = nano_qdens nano_qdens_rate = np.sum(growth_data.qrate2d*grid_data.qpivots) pchem.nano_qdens_rate = nano_qdens_rate plasma_sys", "as np import scipy.constants as const from scipy.integrate import solve_ivp PI = const.pi", "self.efreq = np.zeros((self.nvols, self.nchrgs)) self.ifreq = np.zeros((self.nvols, self.nchrgs)) self.tfreq = np.zeros((self.nvols, self.nchrgs)) def", "- (5.0/(8.0*radius))) if np.isscalar(rtaff): if rtaff < 0: return 1000000.0 else: rtaff[rtaff <" ]
[ "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "writing, software # distributed under the License is distributed on an \"AS IS\"", "-*- # # Copyright 2009 <NAME> # # Licensed under the Apache License,", "KIND, either express or implied. # See the License for the specific language", "limitations under the License. \"\"\"Simple hello world application.\"\"\" import google.appengine.api.xmpp import google.appengine.ext.webapp import", "Unless required by applicable law or agreed to in writing, software # distributed", "a XMPP chat.\"\"\" def post(self): \"\"\"Handles post.\"\"\" jid = self.request.get('jid') if google.appengine.api.xmpp.get_presence(jid): google.appengine.api.xmpp.send_invite(jid)", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "# See the License for the specific language governing permissions and # limitations", "handler.\"\"\" def get(self): \"\"\"Handles get.\"\"\" index = google.appengine.ext.webapp.template.render('index.html', {}) self.response.out.write(index) class XMPPHandler(google.appengine.ext.webapp.RequestHandler): \"\"\"Handles", "('/invite', InviteHandler), ('/.*', HelloWorldRequestHandler), ], debug=True) def main(): \"\"\"The main function.\"\"\" wsgiref.handlers.CGIHandler().run(app) if", "License. # You may obtain a copy of the License at # #", "HelloWorldRequestHandler(google.appengine.ext.webapp.RequestHandler): \"\"\"Simple request handler.\"\"\" def get(self): \"\"\"Handles get.\"\"\" index = google.appengine.ext.webapp.template.render('index.html', {}) self.response.out.write(index)", "get(self): \"\"\"Handles get.\"\"\" index = google.appengine.ext.webapp.template.render('index.html', {}) self.response.out.write(index) class XMPPHandler(google.appengine.ext.webapp.RequestHandler): \"\"\"Handles XMPP messages.\"\"\"", "# limitations under the License. \"\"\"Simple hello world application.\"\"\" import google.appengine.api.xmpp import google.appengine.ext.webapp", "class XMPPHandler(google.appengine.ext.webapp.RequestHandler): \"\"\"Handles XMPP messages.\"\"\" def post(self): \"\"\"Handles post.\"\"\" message = google.appengine.api.xmpp.Message(self.request.POST) logging.info(\"Received", "HelloWorldRequestHandler), ], debug=True) def main(): \"\"\"The main function.\"\"\" wsgiref.handlers.CGIHandler().run(app) if __name__ == '__main__':", "2009 <NAME> # # Licensed under the Apache License, Version 2.0 (the \"License\");", "law or agreed to in writing, software # distributed under the License is", "the License for the specific language governing permissions and # limitations under the", "logging.info(\"Received XMPP message: %s\" % message.body) if message.body[0:5].lower() == 'hello': message.reply(\"Hi, %s!\" %", "compliance with the License. # You may obtain a copy of the License", "('/.*', HelloWorldRequestHandler), ], debug=True) def main(): \"\"\"The main function.\"\"\" wsgiref.handlers.CGIHandler().run(app) if __name__ ==", "the License. \"\"\"Simple hello world application.\"\"\" import google.appengine.api.xmpp import google.appengine.ext.webapp import google.appengine.ext.webapp.template import", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "this file except in compliance with the License. # You may obtain a", "to a XMPP chat.\"\"\" def post(self): \"\"\"Handles post.\"\"\" jid = self.request.get('jid') if google.appengine.api.xmpp.get_presence(jid):", "if message.body[0:5].lower() == 'hello': message.reply(\"Hi, %s!\" % message.sender) class InviteHandler(google.appengine.ext.webapp.RequestHandler): \"\"\"Invites one to", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "= google.appengine.ext.webapp.template.render('index.html', {}) self.response.out.write(index) class XMPPHandler(google.appengine.ext.webapp.RequestHandler): \"\"\"Handles XMPP messages.\"\"\" def post(self): \"\"\"Handles post.\"\"\"", "you may not use this file except in compliance with the License. #", "and # limitations under the License. \"\"\"Simple hello world application.\"\"\" import google.appengine.api.xmpp import", "{}) self.response.out.write(index) class XMPPHandler(google.appengine.ext.webapp.RequestHandler): \"\"\"Handles XMPP messages.\"\"\" def post(self): \"\"\"Handles post.\"\"\" message =", "chat.\"\"\" def post(self): \"\"\"Handles post.\"\"\" jid = self.request.get('jid') if google.appengine.api.xmpp.get_presence(jid): google.appengine.api.xmpp.send_invite(jid) self.redirect('/') app", "utf-8 -*- # # Copyright 2009 <NAME> # # Licensed under the Apache", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "governing permissions and # limitations under the License. \"\"\"Simple hello world application.\"\"\" import", "], debug=True) def main(): \"\"\"The main function.\"\"\" wsgiref.handlers.CGIHandler().run(app) if __name__ == '__main__': main()", "<NAME> # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "ANY KIND, either express or implied. # See the License for the specific", "InviteHandler(google.appengine.ext.webapp.RequestHandler): \"\"\"Invites one to a XMPP chat.\"\"\" def post(self): \"\"\"Handles post.\"\"\" jid =", "post.\"\"\" message = google.appengine.api.xmpp.Message(self.request.POST) logging.info(\"Received XMPP message: %s\" % message.body) if message.body[0:5].lower() ==", "import logging import wsgiref.handlers class HelloWorldRequestHandler(google.appengine.ext.webapp.RequestHandler): \"\"\"Simple request handler.\"\"\" def get(self): \"\"\"Handles get.\"\"\"", "language governing permissions and # limitations under the License. \"\"\"Simple hello world application.\"\"\"", "self.redirect('/') app = google.appengine.ext.webapp.WSGIApplication([ ('/_ah/xmpp/message/chat/', XMPPHandler), ('/invite', InviteHandler), ('/.*', HelloWorldRequestHandler), ], debug=True) def", "in compliance with the License. # You may obtain a copy of the", "for the specific language governing permissions and # limitations under the License. \"\"\"Simple", "# -*- coding: utf-8 -*- # # Copyright 2009 <NAME> # # Licensed", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "google.appengine.ext.webapp import google.appengine.ext.webapp.template import logging import wsgiref.handlers class HelloWorldRequestHandler(google.appengine.ext.webapp.RequestHandler): \"\"\"Simple request handler.\"\"\" def", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "\"\"\"Handles post.\"\"\" jid = self.request.get('jid') if google.appengine.api.xmpp.get_presence(jid): google.appengine.api.xmpp.send_invite(jid) self.redirect('/') app = google.appengine.ext.webapp.WSGIApplication([ ('/_ah/xmpp/message/chat/',", "use this file except in compliance with the License. # You may obtain", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "post(self): \"\"\"Handles post.\"\"\" message = google.appengine.api.xmpp.Message(self.request.POST) logging.info(\"Received XMPP message: %s\" % message.body) if", "permissions and # limitations under the License. \"\"\"Simple hello world application.\"\"\" import google.appengine.api.xmpp", "not use this file except in compliance with the License. # You may", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "class HelloWorldRequestHandler(google.appengine.ext.webapp.RequestHandler): \"\"\"Simple request handler.\"\"\" def get(self): \"\"\"Handles get.\"\"\" index = google.appengine.ext.webapp.template.render('index.html', {})", "Copyright 2009 <NAME> # # Licensed under the Apache License, Version 2.0 (the", "See the License for the specific language governing permissions and # limitations under", "\"\"\"Handles get.\"\"\" index = google.appengine.ext.webapp.template.render('index.html', {}) self.response.out.write(index) class XMPPHandler(google.appengine.ext.webapp.RequestHandler): \"\"\"Handles XMPP messages.\"\"\" def", "google.appengine.ext.webapp.template import logging import wsgiref.handlers class HelloWorldRequestHandler(google.appengine.ext.webapp.RequestHandler): \"\"\"Simple request handler.\"\"\" def get(self): \"\"\"Handles", "google.appengine.api.xmpp.Message(self.request.POST) logging.info(\"Received XMPP message: %s\" % message.body) if message.body[0:5].lower() == 'hello': message.reply(\"Hi, %s!\"", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "%s!\" % message.sender) class InviteHandler(google.appengine.ext.webapp.RequestHandler): \"\"\"Invites one to a XMPP chat.\"\"\" def post(self):", "License, Version 2.0 (the \"License\"); # you may not use this file except", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "jid = self.request.get('jid') if google.appengine.api.xmpp.get_presence(jid): google.appengine.api.xmpp.send_invite(jid) self.redirect('/') app = google.appengine.ext.webapp.WSGIApplication([ ('/_ah/xmpp/message/chat/', XMPPHandler), ('/invite',", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "under the License. \"\"\"Simple hello world application.\"\"\" import google.appengine.api.xmpp import google.appengine.ext.webapp import google.appengine.ext.webapp.template", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "message: %s\" % message.body) if message.body[0:5].lower() == 'hello': message.reply(\"Hi, %s!\" % message.sender) class", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "# # Copyright 2009 <NAME> # # Licensed under the Apache License, Version", "OF ANY KIND, either express or implied. # See the License for the", "hello world application.\"\"\" import google.appengine.api.xmpp import google.appengine.ext.webapp import google.appengine.ext.webapp.template import logging import wsgiref.handlers", "2.0 (the \"License\"); # you may not use this file except in compliance", "# you may not use this file except in compliance with the License.", "self.response.out.write(index) class XMPPHandler(google.appengine.ext.webapp.RequestHandler): \"\"\"Handles XMPP messages.\"\"\" def post(self): \"\"\"Handles post.\"\"\" message = google.appengine.api.xmpp.Message(self.request.POST)", "= google.appengine.api.xmpp.Message(self.request.POST) logging.info(\"Received XMPP message: %s\" % message.body) if message.body[0:5].lower() == 'hello': message.reply(\"Hi,", "one to a XMPP chat.\"\"\" def post(self): \"\"\"Handles post.\"\"\" jid = self.request.get('jid') if", "agreed to in writing, software # distributed under the License is distributed on", "import google.appengine.ext.webapp import google.appengine.ext.webapp.template import logging import wsgiref.handlers class HelloWorldRequestHandler(google.appengine.ext.webapp.RequestHandler): \"\"\"Simple request handler.\"\"\"", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "app = google.appengine.ext.webapp.WSGIApplication([ ('/_ah/xmpp/message/chat/', XMPPHandler), ('/invite', InviteHandler), ('/.*', HelloWorldRequestHandler), ], debug=True) def main():", "XMPPHandler), ('/invite', InviteHandler), ('/.*', HelloWorldRequestHandler), ], debug=True) def main(): \"\"\"The main function.\"\"\" wsgiref.handlers.CGIHandler().run(app)", "= self.request.get('jid') if google.appengine.api.xmpp.get_presence(jid): google.appengine.api.xmpp.send_invite(jid) self.redirect('/') app = google.appengine.ext.webapp.WSGIApplication([ ('/_ah/xmpp/message/chat/', XMPPHandler), ('/invite', InviteHandler),", "import google.appengine.api.xmpp import google.appengine.ext.webapp import google.appengine.ext.webapp.template import logging import wsgiref.handlers class HelloWorldRequestHandler(google.appengine.ext.webapp.RequestHandler): \"\"\"Simple", "License. \"\"\"Simple hello world application.\"\"\" import google.appengine.api.xmpp import google.appengine.ext.webapp import google.appengine.ext.webapp.template import logging", "(the \"License\"); # you may not use this file except in compliance with", "\"\"\"Handles XMPP messages.\"\"\" def post(self): \"\"\"Handles post.\"\"\" message = google.appengine.api.xmpp.Message(self.request.POST) logging.info(\"Received XMPP message:", "index = google.appengine.ext.webapp.template.render('index.html', {}) self.response.out.write(index) class XMPPHandler(google.appengine.ext.webapp.RequestHandler): \"\"\"Handles XMPP messages.\"\"\" def post(self): \"\"\"Handles", "the specific language governing permissions and # limitations under the License. \"\"\"Simple hello", "# # Unless required by applicable law or agreed to in writing, software", "<gh_stars>0 # -*- coding: utf-8 -*- # # Copyright 2009 <NAME> # #", "express or implied. # See the License for the specific language governing permissions", "\"\"\"Simple hello world application.\"\"\" import google.appengine.api.xmpp import google.appengine.ext.webapp import google.appengine.ext.webapp.template import logging import", "\"\"\"Simple request handler.\"\"\" def get(self): \"\"\"Handles get.\"\"\" index = google.appengine.ext.webapp.template.render('index.html', {}) self.response.out.write(index) class", "Version 2.0 (the \"License\"); # you may not use this file except in", "# Unless required by applicable law or agreed to in writing, software #", "def get(self): \"\"\"Handles get.\"\"\" index = google.appengine.ext.webapp.template.render('index.html', {}) self.response.out.write(index) class XMPPHandler(google.appengine.ext.webapp.RequestHandler): \"\"\"Handles XMPP", "except in compliance with the License. # You may obtain a copy of", "google.appengine.api.xmpp.send_invite(jid) self.redirect('/') app = google.appengine.ext.webapp.WSGIApplication([ ('/_ah/xmpp/message/chat/', XMPPHandler), ('/invite', InviteHandler), ('/.*', HelloWorldRequestHandler), ], debug=True)", "by applicable law or agreed to in writing, software # distributed under the", "google.appengine.api.xmpp.get_presence(jid): google.appengine.api.xmpp.send_invite(jid) self.redirect('/') app = google.appengine.ext.webapp.WSGIApplication([ ('/_ah/xmpp/message/chat/', XMPPHandler), ('/invite', InviteHandler), ('/.*', HelloWorldRequestHandler), ],", "def post(self): \"\"\"Handles post.\"\"\" jid = self.request.get('jid') if google.appengine.api.xmpp.get_presence(jid): google.appengine.api.xmpp.send_invite(jid) self.redirect('/') app =", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "# Copyright 2009 <NAME> # # Licensed under the Apache License, Version 2.0", "= google.appengine.ext.webapp.WSGIApplication([ ('/_ah/xmpp/message/chat/', XMPPHandler), ('/invite', InviteHandler), ('/.*', HelloWorldRequestHandler), ], debug=True) def main(): \"\"\"The", "either express or implied. # See the License for the specific language governing", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "logging import wsgiref.handlers class HelloWorldRequestHandler(google.appengine.ext.webapp.RequestHandler): \"\"\"Simple request handler.\"\"\" def get(self): \"\"\"Handles get.\"\"\" index", "google.appengine.ext.webapp.WSGIApplication([ ('/_ah/xmpp/message/chat/', XMPPHandler), ('/invite', InviteHandler), ('/.*', HelloWorldRequestHandler), ], debug=True) def main(): \"\"\"The main", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "specific language governing permissions and # limitations under the License. \"\"\"Simple hello world", "world application.\"\"\" import google.appengine.api.xmpp import google.appengine.ext.webapp import google.appengine.ext.webapp.template import logging import wsgiref.handlers class", "XMPP message: %s\" % message.body) if message.body[0:5].lower() == 'hello': message.reply(\"Hi, %s!\" % message.sender)", "messages.\"\"\" def post(self): \"\"\"Handles post.\"\"\" message = google.appengine.api.xmpp.Message(self.request.POST) logging.info(\"Received XMPP message: %s\" %", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "\"\"\"Handles post.\"\"\" message = google.appengine.api.xmpp.Message(self.request.POST) logging.info(\"Received XMPP message: %s\" % message.body) if message.body[0:5].lower()", "coding: utf-8 -*- # # Copyright 2009 <NAME> # # Licensed under the", "file except in compliance with the License. # You may obtain a copy", "message.sender) class InviteHandler(google.appengine.ext.webapp.RequestHandler): \"\"\"Invites one to a XMPP chat.\"\"\" def post(self): \"\"\"Handles post.\"\"\"", "'hello': message.reply(\"Hi, %s!\" % message.sender) class InviteHandler(google.appengine.ext.webapp.RequestHandler): \"\"\"Invites one to a XMPP chat.\"\"\"", "XMPP chat.\"\"\" def post(self): \"\"\"Handles post.\"\"\" jid = self.request.get('jid') if google.appengine.api.xmpp.get_presence(jid): google.appengine.api.xmpp.send_invite(jid) self.redirect('/')", "message = google.appengine.api.xmpp.Message(self.request.POST) logging.info(\"Received XMPP message: %s\" % message.body) if message.body[0:5].lower() == 'hello':", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "self.request.get('jid') if google.appengine.api.xmpp.get_presence(jid): google.appengine.api.xmpp.send_invite(jid) self.redirect('/') app = google.appengine.ext.webapp.WSGIApplication([ ('/_ah/xmpp/message/chat/', XMPPHandler), ('/invite', InviteHandler), ('/.*',", "License for the specific language governing permissions and # limitations under the License.", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "get.\"\"\" index = google.appengine.ext.webapp.template.render('index.html', {}) self.response.out.write(index) class XMPPHandler(google.appengine.ext.webapp.RequestHandler): \"\"\"Handles XMPP messages.\"\"\" def post(self):", "the License. # You may obtain a copy of the License at #", "\"\"\"Invites one to a XMPP chat.\"\"\" def post(self): \"\"\"Handles post.\"\"\" jid = self.request.get('jid')", "to in writing, software # distributed under the License is distributed on an", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "== 'hello': message.reply(\"Hi, %s!\" % message.sender) class InviteHandler(google.appengine.ext.webapp.RequestHandler): \"\"\"Invites one to a XMPP", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "implied. # See the License for the specific language governing permissions and #", "google.appengine.api.xmpp import google.appengine.ext.webapp import google.appengine.ext.webapp.template import logging import wsgiref.handlers class HelloWorldRequestHandler(google.appengine.ext.webapp.RequestHandler): \"\"\"Simple request", "\"License\"); # you may not use this file except in compliance with the", "def post(self): \"\"\"Handles post.\"\"\" message = google.appengine.api.xmpp.Message(self.request.POST) logging.info(\"Received XMPP message: %s\" % message.body)", "application.\"\"\" import google.appengine.api.xmpp import google.appengine.ext.webapp import google.appengine.ext.webapp.template import logging import wsgiref.handlers class HelloWorldRequestHandler(google.appengine.ext.webapp.RequestHandler):", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "import google.appengine.ext.webapp.template import logging import wsgiref.handlers class HelloWorldRequestHandler(google.appengine.ext.webapp.RequestHandler): \"\"\"Simple request handler.\"\"\" def get(self):", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "import wsgiref.handlers class HelloWorldRequestHandler(google.appengine.ext.webapp.RequestHandler): \"\"\"Simple request handler.\"\"\" def get(self): \"\"\"Handles get.\"\"\" index =", "required by applicable law or agreed to in writing, software # distributed under", "request handler.\"\"\" def get(self): \"\"\"Handles get.\"\"\" index = google.appengine.ext.webapp.template.render('index.html', {}) self.response.out.write(index) class XMPPHandler(google.appengine.ext.webapp.RequestHandler):", "XMPPHandler(google.appengine.ext.webapp.RequestHandler): \"\"\"Handles XMPP messages.\"\"\" def post(self): \"\"\"Handles post.\"\"\" message = google.appengine.api.xmpp.Message(self.request.POST) logging.info(\"Received XMPP", "message.body) if message.body[0:5].lower() == 'hello': message.reply(\"Hi, %s!\" % message.sender) class InviteHandler(google.appengine.ext.webapp.RequestHandler): \"\"\"Invites one", "% message.sender) class InviteHandler(google.appengine.ext.webapp.RequestHandler): \"\"\"Invites one to a XMPP chat.\"\"\" def post(self): \"\"\"Handles", "if google.appengine.api.xmpp.get_presence(jid): google.appengine.api.xmpp.send_invite(jid) self.redirect('/') app = google.appengine.ext.webapp.WSGIApplication([ ('/_ah/xmpp/message/chat/', XMPPHandler), ('/invite', InviteHandler), ('/.*', HelloWorldRequestHandler),", "XMPP messages.\"\"\" def post(self): \"\"\"Handles post.\"\"\" message = google.appengine.api.xmpp.Message(self.request.POST) logging.info(\"Received XMPP message: %s\"", "applicable law or agreed to in writing, software # distributed under the License", "message.body[0:5].lower() == 'hello': message.reply(\"Hi, %s!\" % message.sender) class InviteHandler(google.appengine.ext.webapp.RequestHandler): \"\"\"Invites one to a", "%s\" % message.body) if message.body[0:5].lower() == 'hello': message.reply(\"Hi, %s!\" % message.sender) class InviteHandler(google.appengine.ext.webapp.RequestHandler):", "message.reply(\"Hi, %s!\" % message.sender) class InviteHandler(google.appengine.ext.webapp.RequestHandler): \"\"\"Invites one to a XMPP chat.\"\"\" def", "post(self): \"\"\"Handles post.\"\"\" jid = self.request.get('jid') if google.appengine.api.xmpp.get_presence(jid): google.appengine.api.xmpp.send_invite(jid) self.redirect('/') app = google.appengine.ext.webapp.WSGIApplication([", "('/_ah/xmpp/message/chat/', XMPPHandler), ('/invite', InviteHandler), ('/.*', HelloWorldRequestHandler), ], debug=True) def main(): \"\"\"The main function.\"\"\"", "post.\"\"\" jid = self.request.get('jid') if google.appengine.api.xmpp.get_presence(jid): google.appengine.api.xmpp.send_invite(jid) self.redirect('/') app = google.appengine.ext.webapp.WSGIApplication([ ('/_ah/xmpp/message/chat/', XMPPHandler),", "or agreed to in writing, software # distributed under the License is distributed", "% message.body) if message.body[0:5].lower() == 'hello': message.reply(\"Hi, %s!\" % message.sender) class InviteHandler(google.appengine.ext.webapp.RequestHandler): \"\"\"Invites", "InviteHandler), ('/.*', HelloWorldRequestHandler), ], debug=True) def main(): \"\"\"The main function.\"\"\" wsgiref.handlers.CGIHandler().run(app) if __name__", "or implied. # See the License for the specific language governing permissions and", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "google.appengine.ext.webapp.template.render('index.html', {}) self.response.out.write(index) class XMPPHandler(google.appengine.ext.webapp.RequestHandler): \"\"\"Handles XMPP messages.\"\"\" def post(self): \"\"\"Handles post.\"\"\" message", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "-*- coding: utf-8 -*- # # Copyright 2009 <NAME> # # Licensed under", "with the License. # You may obtain a copy of the License at", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "wsgiref.handlers class HelloWorldRequestHandler(google.appengine.ext.webapp.RequestHandler): \"\"\"Simple request handler.\"\"\" def get(self): \"\"\"Handles get.\"\"\" index = google.appengine.ext.webapp.template.render('index.html',", "in writing, software # distributed under the License is distributed on an \"AS", "class InviteHandler(google.appengine.ext.webapp.RequestHandler): \"\"\"Invites one to a XMPP chat.\"\"\" def post(self): \"\"\"Handles post.\"\"\" jid", "under the Apache License, Version 2.0 (the \"License\"); # you may not use" ]
[]
[]
[ "MB s = list(s) i = j = k = 0 # i:", "while still preserving whitespace and initial word order. 1 <= s.length <= 5", "words in s are separated by a single space. \"\"\" import unittest class", "i += 1 k -= 1 i = k = j + 1", "s, reverse the order of characters in each word within a sentence while", "i += 1 k -= 1 else: k = j + 1 j", "in s.split(' ')) def reverseWords_two_pointers(self, s: str) -> str: # Runtime: 148 ms,", "= j + 1 j += 1 return ''.join(s) class TestSolution(unittest.TestCase): methods_to_test =", "and not func.startswith('__')] def test1(self): s = \"Let's take LeetCode contest\" sol =", "= getattr(sol, method) actual_output = method_to_test(s) assert actual_output == expected_output def test2(self): s", "k = 0 # i: start of a word, j: current head location,", "i = j = k = 0 # i: start of a word,", "s: str) -> str: # Runtime: 148 ms, Memory Usage: 15.2 MB s", "= [ func for func in dir(Solution) if callable(getattr(Solution, func)) and not func.startswith('__')]", "Usage: 15.2 MB s = list(s) i = j = k = 0", "while i < k: s[k], s[i] = s[i], s[k] i += 1 k", "j + 1 elif j == len(s) - 1: k = j while", "1 return ''.join(s) class TestSolution(unittest.TestCase): methods_to_test = [ func for func in dir(Solution)", "return ' '.join(w[::-1] for w in s.split(' ')) def reverseWords_two_pointers(self, s: str) ->", "word order. 1 <= s.length <= 5 * 10**4 s contains printable ASCII", "i < k: s[k], s[i] = s[i], s[k] i += 1 k -=", "'.join(w[::-1] for w in s.split(' ')) def reverseWords_two_pointers(self, s: str) -> str: #", "not contain any leading or trailing spaces. There is at least one word", "1 j += 1 return ''.join(s) class TestSolution(unittest.TestCase): methods_to_test = [ func for", "callable(getattr(Solution, func)) and not func.startswith('__')] def test1(self): s = \"Let's take LeetCode contest\"", "s[i], s[k] i += 1 k -= 1 i = k = j", "j + 1 j += 1 return ''.join(s) class TestSolution(unittest.TestCase): methods_to_test = [", "w in s.split(' ')) def reverseWords_two_pointers(self, s: str) -> str: # Runtime: 148", "\"Let's take LeetCode contest\" sol = Solution() expected_output = \"s'teL ekat edoCteeL tsetnoc\"", "== expected_output def test2(self): s = \"<NAME>\" sol = Solution() expected_output = \"doG", "1: k = j while i < k: s[k], s[i] = s[i], s[k]", "-> str: # Runtime: 36 ms, Memory Usage: 14.7 MB return ' '.join(w[::-1]", "str) -> str: # Runtime: 148 ms, Memory Usage: 15.2 MB s =", "word within a sentence while still preserving whitespace and initial word order. 1", "def reverseWords(self, s: str) -> str: # Runtime: 36 ms, Memory Usage: 14.7", "= \"<NAME>\" sol = Solution() expected_output = \"doG gniD\" for method in TestSolution.methods_to_test:", "by a single space. \"\"\" import unittest class Solution: def reverseWords(self, s: str)", "')) def reverseWords_two_pointers(self, s: str) -> str: # Runtime: 148 ms, Memory Usage:", "Solution: def reverseWords(self, s: str) -> str: # Runtime: 36 ms, Memory Usage:", "if callable(getattr(Solution, func)) and not func.startswith('__')] def test1(self): s = \"Let's take LeetCode", "5 * 10**4 s contains printable ASCII characters. s does not contain any", "# Runtime: 36 ms, Memory Usage: 14.7 MB return ' '.join(w[::-1] for w", "s[k], s[i] = s[i], s[k] i += 1 k -= 1 else: k", "j == len(s) - 1: k = j while i < k: s[k],", "s.length <= 5 * 10**4 s contains printable ASCII characters. s does not", "return ''.join(s) class TestSolution(unittest.TestCase): methods_to_test = [ func for func in dir(Solution) if", "the order of characters in each word within a sentence while still preserving", "== len(s) - 1: k = j while i < k: s[k], s[i]", "s are separated by a single space. \"\"\" import unittest class Solution: def", "j = k = 0 # i: start of a word, j: current", "= k = j + 1 elif j == len(s) - 1: k", "Solution() expected_output = \"s'teL ekat edoCteeL tsetnoc\" for method in TestSolution.methods_to_test: method_to_test =", "still preserving whitespace and initial word order. 1 <= s.length <= 5 *", "= s[i], s[k] i += 1 k -= 1 i = k =", "in s. All the words in s are separated by a single space.", "any leading or trailing spaces. There is at least one word in s.", "1 else: k = j + 1 j += 1 return ''.join(s) class", "unittest class Solution: def reverseWords(self, s: str) -> str: # Runtime: 36 ms,", "+= 1 return ''.join(s) class TestSolution(unittest.TestCase): methods_to_test = [ func for func in", "word in s. All the words in s are separated by a single", "= Solution() expected_output = \"s'teL ekat edoCteeL tsetnoc\" for method in TestSolution.methods_to_test: method_to_test", "= \"Let's take LeetCode contest\" sol = Solution() expected_output = \"s'teL ekat edoCteeL", "s does not contain any leading or trailing spaces. There is at least", "s[i] = s[i], s[k] i += 1 k -= 1 else: k =", "148 ms, Memory Usage: 15.2 MB s = list(s) i = j =", "order. 1 <= s.length <= 5 * 10**4 s contains printable ASCII characters.", "word while j < len(s): if s[j] == ' ': k -= 1", "All the words in s are separated by a single space. \"\"\" import", "len(s) - 1: k = j while i < k: s[k], s[i] =", "method_to_test(s) assert actual_output == expected_output def test2(self): s = \"<NAME>\" sol = Solution()", "test2(self): s = \"<NAME>\" sol = Solution() expected_output = \"doG gniD\" for method", "\"s'teL ekat edoCteeL tsetnoc\" for method in TestSolution.methods_to_test: method_to_test = getattr(sol, method) actual_output", "ms, Memory Usage: 14.7 MB return ' '.join(w[::-1] for w in s.split(' '))", "list(s) i = j = k = 0 # i: start of a", "ekat edoCteeL tsetnoc\" for method in TestSolution.methods_to_test: method_to_test = getattr(sol, method) actual_output =", "j < len(s): if s[j] == ' ': k -= 1 while i", "<= s.length <= 5 * 10**4 s contains printable ASCII characters. s does", "' ': k -= 1 while i < k: s[k], s[i] = s[i],", "of a word, j: current head location, k: end of a word while", "contain any leading or trailing spaces. There is at least one word in", "s[i] = s[i], s[k] i += 1 k -= 1 i = k", "str: # Runtime: 148 ms, Memory Usage: 15.2 MB s = list(s) i", "j while i < k: s[k], s[i] = s[i], s[k] i += 1", "methods_to_test = [ func for func in dir(Solution) if callable(getattr(Solution, func)) and not", "in TestSolution.methods_to_test: method_to_test = getattr(sol, method) actual_output = method_to_test(s) assert actual_output == expected_output", "in each word within a sentence while still preserving whitespace and initial word", "s contains printable ASCII characters. s does not contain any leading or trailing", "s[j] == ' ': k -= 1 while i < k: s[k], s[i]", "func in dir(Solution) if callable(getattr(Solution, func)) and not func.startswith('__')] def test1(self): s =", "func)) and not func.startswith('__')] def test1(self): s = \"Let's take LeetCode contest\" sol", "def test2(self): s = \"<NAME>\" sol = Solution() expected_output = \"doG gniD\" for", "len(s): if s[j] == ' ': k -= 1 while i < k:", "- 1: k = j while i < k: s[k], s[i] = s[i],", "= j = k = 0 # i: start of a word, j:", "k = j + 1 j += 1 return ''.join(s) class TestSolution(unittest.TestCase): methods_to_test", "separated by a single space. \"\"\" import unittest class Solution: def reverseWords(self, s:", "Memory Usage: 14.7 MB return ' '.join(w[::-1] for w in s.split(' ')) def", "a string s, reverse the order of characters in each word within a", "gniD\" for method in TestSolution.methods_to_test: method_to_test = getattr(sol, method) actual_output = method_to_test(s) assert", "-> str: # Runtime: 148 ms, Memory Usage: 15.2 MB s = list(s)", "s[k] i += 1 k -= 1 i = k = j +", "k = j while i < k: s[k], s[i] = s[i], s[k] i", "for func in dir(Solution) if callable(getattr(Solution, func)) and not func.startswith('__')] def test1(self): s", "k -= 1 else: k = j + 1 j += 1 return", "1 <= s.length <= 5 * 10**4 s contains printable ASCII characters. s", "import unittest class Solution: def reverseWords(self, s: str) -> str: # Runtime: 36", "expected_output def test2(self): s = \"<NAME>\" sol = Solution() expected_output = \"doG gniD\"", "MB return ' '.join(w[::-1] for w in s.split(' ')) def reverseWords_two_pointers(self, s: str)", "Runtime: 36 ms, Memory Usage: 14.7 MB return ' '.join(w[::-1] for w in", "take LeetCode contest\" sol = Solution() expected_output = \"s'teL ekat edoCteeL tsetnoc\" for", "whitespace and initial word order. 1 <= s.length <= 5 * 10**4 s", "s[k] i += 1 k -= 1 else: k = j + 1", "s = \"Let's take LeetCode contest\" sol = Solution() expected_output = \"s'teL ekat", "for method in TestSolution.methods_to_test: method_to_test = getattr(sol, method) actual_output = method_to_test(s) assert actual_output", "\"\"\" import unittest class Solution: def reverseWords(self, s: str) -> str: # Runtime:", "= Solution() expected_output = \"doG gniD\" for method in TestSolution.methods_to_test: method_to_test = getattr(sol,", "a word, j: current head location, k: end of a word while j", "s: str) -> str: # Runtime: 36 ms, Memory Usage: 14.7 MB return", "< k: s[k], s[i] = s[i], s[k] i += 1 k -= 1", "at least one word in s. All the words in s are separated", "k: s[k], s[i] = s[i], s[k] i += 1 k -= 1 i", "class Solution: def reverseWords(self, s: str) -> str: # Runtime: 36 ms, Memory", "ms, Memory Usage: 15.2 MB s = list(s) i = j = k", "spaces. There is at least one word in s. All the words in", "= list(s) i = j = k = 0 # i: start of", "location, k: end of a word while j < len(s): if s[j] ==", "< len(s): if s[j] == ' ': k -= 1 while i <", "* 10**4 s contains printable ASCII characters. s does not contain any leading", "''.join(s) class TestSolution(unittest.TestCase): methods_to_test = [ func for func in dir(Solution) if callable(getattr(Solution,", "edoCteeL tsetnoc\" for method in TestSolution.methods_to_test: method_to_test = getattr(sol, method) actual_output = method_to_test(s)", "ASCII characters. s does not contain any leading or trailing spaces. There is", "1 k -= 1 else: k = j + 1 j += 1", "reverseWords_two_pointers(self, s: str) -> str: # Runtime: 148 ms, Memory Usage: 15.2 MB", "# Runtime: 148 ms, Memory Usage: 15.2 MB s = list(s) i =", "': k -= 1 while i < k: s[k], s[i] = s[i], s[k]", "class TestSolution(unittest.TestCase): methods_to_test = [ func for func in dir(Solution) if callable(getattr(Solution, func))", "dir(Solution) if callable(getattr(Solution, func)) and not func.startswith('__')] def test1(self): s = \"Let's take", "\"<NAME>\" sol = Solution() expected_output = \"doG gniD\" for method in TestSolution.methods_to_test: method_to_test", "= s[i], s[k] i += 1 k -= 1 else: k = j", "word, j: current head location, k: end of a word while j <", "are separated by a single space. \"\"\" import unittest class Solution: def reverseWords(self,", "func for func in dir(Solution) if callable(getattr(Solution, func)) and not func.startswith('__')] def test1(self):", "is at least one word in s. All the words in s are", "elif j == len(s) - 1: k = j while i < k:", "1 elif j == len(s) - 1: k = j while i <", "= j while i < k: s[k], s[i] = s[i], s[k] i +=", "k: s[k], s[i] = s[i], s[k] i += 1 k -= 1 else:", "contest\" sol = Solution() expected_output = \"s'teL ekat edoCteeL tsetnoc\" for method in", "method) actual_output = method_to_test(s) assert actual_output == expected_output def test2(self): s = \"<NAME>\"", "\"doG gniD\" for method in TestSolution.methods_to_test: method_to_test = getattr(sol, method) actual_output = method_to_test(s)", "in dir(Solution) if callable(getattr(Solution, func)) and not func.startswith('__')] def test1(self): s = \"Let's", "single space. \"\"\" import unittest class Solution: def reverseWords(self, s: str) -> str:", "s. All the words in s are separated by a single space. \"\"\"", "actual_output = method_to_test(s) assert actual_output == expected_output def test2(self): s = \"<NAME>\" sol", "i: start of a word, j: current head location, k: end of a", "+ 1 elif j == len(s) - 1: k = j while i", "= j + 1 elif j == len(s) - 1: k = j", "sol = Solution() expected_output = \"doG gniD\" for method in TestSolution.methods_to_test: method_to_test =", "<= 5 * 10**4 s contains printable ASCII characters. s does not contain", "else: k = j + 1 j += 1 return ''.join(s) class TestSolution(unittest.TestCase):", "# i: start of a word, j: current head location, k: end of", "least one word in s. All the words in s are separated by", "TestSolution(unittest.TestCase): methods_to_test = [ func for func in dir(Solution) if callable(getattr(Solution, func)) and", "end of a word while j < len(s): if s[j] == ' ':", "Solution() expected_output = \"doG gniD\" for method in TestSolution.methods_to_test: method_to_test = getattr(sol, method)", "within a sentence while still preserving whitespace and initial word order. 1 <=", "the words in s are separated by a single space. \"\"\" import unittest", "[ func for func in dir(Solution) if callable(getattr(Solution, func)) and not func.startswith('__')] def", "if s[j] == ' ': k -= 1 while i < k: s[k],", "+= 1 k -= 1 i = k = j + 1 elif", "assert actual_output == expected_output def test2(self): s = \"<NAME>\" sol = Solution() expected_output", "15.2 MB s = list(s) i = j = k = 0 #", "k -= 1 i = k = j + 1 elif j ==", "while j < len(s): if s[j] == ' ': k -= 1 while", "each word within a sentence while still preserving whitespace and initial word order.", "str) -> str: # Runtime: 36 ms, Memory Usage: 14.7 MB return '", "leading or trailing spaces. There is at least one word in s. All", "1 while i < k: s[k], s[i] = s[i], s[k] i += 1", "str: # Runtime: 36 ms, Memory Usage: 14.7 MB return ' '.join(w[::-1] for", "reverse the order of characters in each word within a sentence while still", "sentence while still preserving whitespace and initial word order. 1 <= s.length <=", "string s, reverse the order of characters in each word within a sentence", "one word in s. All the words in s are separated by a", "i = k = j + 1 elif j == len(s) - 1:", "+ 1 j += 1 return ''.join(s) class TestSolution(unittest.TestCase): methods_to_test = [ func", "test1(self): s = \"Let's take LeetCode contest\" sol = Solution() expected_output = \"s'teL", "tsetnoc\" for method in TestSolution.methods_to_test: method_to_test = getattr(sol, method) actual_output = method_to_test(s) assert", "getattr(sol, method) actual_output = method_to_test(s) assert actual_output == expected_output def test2(self): s =", "There is at least one word in s. All the words in s", "order of characters in each word within a sentence while still preserving whitespace", "TestSolution.methods_to_test: method_to_test = getattr(sol, method) actual_output = method_to_test(s) assert actual_output == expected_output def", "def test1(self): s = \"Let's take LeetCode contest\" sol = Solution() expected_output =", "= method_to_test(s) assert actual_output == expected_output def test2(self): s = \"<NAME>\" sol =", "of a word while j < len(s): if s[j] == ' ': k", "head location, k: end of a word while j < len(s): if s[j]", "def reverseWords_two_pointers(self, s: str) -> str: # Runtime: 148 ms, Memory Usage: 15.2", "expected_output = \"doG gniD\" for method in TestSolution.methods_to_test: method_to_test = getattr(sol, method) actual_output", "current head location, k: end of a word while j < len(s): if", "s.split(' ')) def reverseWords_two_pointers(self, s: str) -> str: # Runtime: 148 ms, Memory", "-= 1 i = k = j + 1 elif j == len(s)", "-= 1 while i < k: s[k], s[i] = s[i], s[k] i +=", "func.startswith('__')] def test1(self): s = \"Let's take LeetCode contest\" sol = Solution() expected_output", "a single space. \"\"\" import unittest class Solution: def reverseWords(self, s: str) ->", "1 k -= 1 i = k = j + 1 elif j", "s = \"<NAME>\" sol = Solution() expected_output = \"doG gniD\" for method in", "= \"doG gniD\" for method in TestSolution.methods_to_test: method_to_test = getattr(sol, method) actual_output =", "\"\"\" Given a string s, reverse the order of characters in each word", "k -= 1 while i < k: s[k], s[i] = s[i], s[k] i", "-= 1 else: k = j + 1 j += 1 return ''.join(s)", "or trailing spaces. There is at least one word in s. All the", "j: current head location, k: end of a word while j < len(s):", "+= 1 k -= 1 else: k = j + 1 j +=", "Memory Usage: 15.2 MB s = list(s) i = j = k =", "36 ms, Memory Usage: 14.7 MB return ' '.join(w[::-1] for w in s.split('", "s[i], s[k] i += 1 k -= 1 else: k = j +", "method in TestSolution.methods_to_test: method_to_test = getattr(sol, method) actual_output = method_to_test(s) assert actual_output ==", "actual_output == expected_output def test2(self): s = \"<NAME>\" sol = Solution() expected_output =", "Given a string s, reverse the order of characters in each word within", "method_to_test = getattr(sol, method) actual_output = method_to_test(s) assert actual_output == expected_output def test2(self):", "= k = 0 # i: start of a word, j: current head", "space. \"\"\" import unittest class Solution: def reverseWords(self, s: str) -> str: #", "k: end of a word while j < len(s): if s[j] == '", "a word while j < len(s): if s[j] == ' ': k -=", "expected_output = \"s'teL ekat edoCteeL tsetnoc\" for method in TestSolution.methods_to_test: method_to_test = getattr(sol,", "Usage: 14.7 MB return ' '.join(w[::-1] for w in s.split(' ')) def reverseWords_two_pointers(self,", "start of a word, j: current head location, k: end of a word", "in s are separated by a single space. \"\"\" import unittest class Solution:", "= \"s'teL ekat edoCteeL tsetnoc\" for method in TestSolution.methods_to_test: method_to_test = getattr(sol, method)", "j += 1 return ''.join(s) class TestSolution(unittest.TestCase): methods_to_test = [ func for func", "and initial word order. 1 <= s.length <= 5 * 10**4 s contains", "1 i = k = j + 1 elif j == len(s) -", "a sentence while still preserving whitespace and initial word order. 1 <= s.length", "printable ASCII characters. s does not contain any leading or trailing spaces. There", "does not contain any leading or trailing spaces. There is at least one", "14.7 MB return ' '.join(w[::-1] for w in s.split(' ')) def reverseWords_two_pointers(self, s:", "for w in s.split(' ')) def reverseWords_two_pointers(self, s: str) -> str: # Runtime:", "reverseWords(self, s: str) -> str: # Runtime: 36 ms, Memory Usage: 14.7 MB", "initial word order. 1 <= s.length <= 5 * 10**4 s contains printable", "== ' ': k -= 1 while i < k: s[k], s[i] =", "k = j + 1 elif j == len(s) - 1: k =", "characters in each word within a sentence while still preserving whitespace and initial", "characters. s does not contain any leading or trailing spaces. There is at", "sol = Solution() expected_output = \"s'teL ekat edoCteeL tsetnoc\" for method in TestSolution.methods_to_test:", "preserving whitespace and initial word order. 1 <= s.length <= 5 * 10**4", "of characters in each word within a sentence while still preserving whitespace and", "LeetCode contest\" sol = Solution() expected_output = \"s'teL ekat edoCteeL tsetnoc\" for method", "= 0 # i: start of a word, j: current head location, k:", "' '.join(w[::-1] for w in s.split(' ')) def reverseWords_two_pointers(self, s: str) -> str:", "contains printable ASCII characters. s does not contain any leading or trailing spaces.", "trailing spaces. There is at least one word in s. All the words", "10**4 s contains printable ASCII characters. s does not contain any leading or", "s = list(s) i = j = k = 0 # i: start", "Runtime: 148 ms, Memory Usage: 15.2 MB s = list(s) i = j", "not func.startswith('__')] def test1(self): s = \"Let's take LeetCode contest\" sol = Solution()", "0 # i: start of a word, j: current head location, k: end", "s[k], s[i] = s[i], s[k] i += 1 k -= 1 i =" ]
[ "= sp_typ + f'elodie_{dataset}_{imanum}.fits' path = '' if path is None else path", "= self.BASE + f'c=i&a=mime:application/fits&o=sophie:[{s1d},{seq}]' filename = f'sophie_[{s1d},{seq}].fits' path = '' if path is", "self.BASE + f'n=sophiecc&ob=bjd&a=csv&o={self.obj}&d=' int_cols = ['seq','sseq','slen','nexp','expno','ccf_offline','maxcpp','lines'] float_cols = ['bjd','rv','err','dvrms','fwhm','span','contrast','sn26'] url, df = _get_df(url_,", "\"\"\" Module core =========== This module retrieves data from ELODIE/SOPHIE archive. It has", "= url_ + str(list(col_dc.keys())).replace(\"'\", \"\").replace(\" \", \"\")[1:-1] req = requests.request('GET', url) r =", "on Elodie FITS files: http://atlas.obs-hp.fr/elodie/500/download.html Help on Sophie FITS files: http://atlas.obs-hp.fr/sophie/spec_help.html \"\"\" import", "'' if path is None else path urlretrieve(url, path+filename) class Sophie: def __init__(self,", "= obj self.BASE = 'http://atlas.obs-hp.fr/elodie/fE.cgi?' def ccf(self): \"\"\" Elodie Cross-Correlation Functions table \"\"\"", "cols = valid_lines[0].split(' ') data_lines = [i.split('\\t') for i in valid_lines[1:]] df =", "= f'sophie_[{s1d},{seq}].fits' path = '' if path is None else path urlretrieve(url, path+filename)", "---------- obj (str) : object name Methods ------- ccf : return Cross-Correlation Functions", "int_cols, float_cols) print(url.replace('a=csv', 'a=htab')) return df def get_spec(self, seq, path=None, s1d=True): s1d =", "df = _get_df(url_, desc_el_spec, int_cols, float_cols) print(url.replace('a=csv', 'a=htab')) return df def get_spec(self, dataset,", "print(url.replace('a=csv', 'a=htab')) return df def spec(self): \"\"\" Elodie Spectra table \"\"\" url_ =", "url, df class Elodie: def __init__(self, obj): \"\"\" Elodie class Parameters ---------- obj", "['bjd','rv','err','dvrms','fwhm','span','contrast','sn26'] url, df = _get_df(url_, desc_so_ccf, int_cols, float_cols) print(url.replace('a=csv', 'a=htab')) return df def", "Elodie Cross-Correlation Functions table \"\"\" url_ = self.BASE + f'n=e501&o={self.obj}&ob=jdb&a=csv&&d=' int_cols = ['datenuit']", "= r.splitlines() valid_lines = [i for i in lines if i[0]!='#'] cols =", "self.BASE + f'o={self.obj}&a=csv&d=' int_cols = ['dataset'] float_cols = ['exptime','sn','vfit','sigfit','ampfit'] url, df = _get_df(url_,", "return url, df class Elodie: def __init__(self, obj): \"\"\" Elodie class Parameters ----------", "Elodie: def __init__(self, obj): \"\"\" Elodie class Parameters ---------- obj (str) : object", "df[int_cols] = df[int_cols].astype(int) return url, df class Elodie: def __init__(self, obj): \"\"\" Elodie", "s1 + '&a=mime:application/x-fits' url = BASE + PAR1+ PAR2 sp_typ = 's1d_' if", "desc_el_ccf, int_cols, float_cols) print(url.replace('a=csv', 'a=htab')) return df def spec(self): \"\"\" Elodie Spectra table", "Cross-Correlation Functions table \"\"\" url_ = self.BASE + f'n=sophiecc&ob=bjd&a=csv&o={self.obj}&d=' int_cols = ['seq','sseq','slen','nexp','expno','ccf_offline','maxcpp','lines'] float_cols", "requests from urllib.request import urlretrieve from .columns import desc_el_ccf, desc_el_spec, desc_so_ccf, desc_so_spec def", "np.nan df[float_cols] = df[float_cols].astype(float) df[int_cols] = df[int_cols].astype(int) return url, df class Elodie: def", "return df def spec(self): \"\"\" Sophie Spectra table \"\"\" url_ = self.BASE +", "df[float_cols] = df[float_cols].astype(float) df[int_cols] = df[int_cols].astype(int) return url, df class Elodie: def __init__(self,", "float_cols) print(url.replace('a=csv', 'a=htab')) return df def spec(self): \"\"\" Sophie Spectra table \"\"\" url_", "self.BASE + f'n=e501&o={self.obj}&ob=jdb&a=csv&&d=' int_cols = ['datenuit'] float_cols = ['jdb','exptim','sn','vfit','sigfit','ampfit','ctefit'] url, df = _get_df(url_,", "FITS files: http://atlas.obs-hp.fr/elodie/500/download.html Help on Sophie FITS files: http://atlas.obs-hp.fr/sophie/spec_help.html \"\"\" import numpy as", "float_cols = ['bjd','rv','err','dvrms','fwhm','span','contrast','sn26'] url, df = _get_df(url_, desc_so_ccf, int_cols, float_cols) print(url.replace('a=csv', 'a=htab')) return", "= [i for i in lines if i[0]!='#'] cols = valid_lines[0].split(' ') data_lines", ": return Cross-Correlation Functions table spect : Spectra table \"\"\" self.obj = obj", "def spec(self): \"\"\" Sophie Spectra table \"\"\" url_ = self.BASE + f'n=sophie&a=csv&ob=bjd&c=o&o={self.obj}&d=' int_cols", "+ '&a=mime:application/x-fits' url = BASE + PAR1+ PAR2 sp_typ = 's1d_' if s1d", "requests.request('GET', url) r = req.content.decode('utf-8') lines = r.splitlines() valid_lines = [i for i", "float_cols = ['exptime','sn','vfit','sigfit','ampfit'] url, df = _get_df(url_, desc_el_spec, int_cols, float_cols) print(url.replace('a=csv', 'a=htab')) return", "__init__(self, obj): \"\"\" Sophie class Parameters ---------- obj (str) : object name Methods", "Parameters ---------- obj (str) : object name Methods ------- ccf : return Cross-Correlation", "f'n=e501&o={self.obj}&ob=jdb&a=csv&&d=' int_cols = ['datenuit'] float_cols = ['jdb','exptim','sn','vfit','sigfit','ampfit','ctefit'] url, df = _get_df(url_, desc_el_ccf, int_cols,", "f'&c=i&o=elodie:{dataset}/{imanum}' PAR2 = s1 + '&a=mime:application/x-fits' url = BASE + PAR1+ PAR2 sp_typ", "= ['seq','sseq','slen','nexp','expno'] float_cols = ['bjd','sn26','exptime'] url, df = _get_df(url_, desc_so_spec, int_cols, float_cols) print(url.replace('a=csv',", "url_ = self.BASE + f'n=sophiecc&ob=bjd&a=csv&o={self.obj}&d=' int_cols = ['seq','sseq','slen','nexp','expno','ccf_offline','maxcpp','lines'] float_cols = ['bjd','rv','err','dvrms','fwhm','span','contrast','sn26'] url, df", "ELODIE/SOPHIE archive. It has two classes, Elodie and Sophie, both could be constructed", "PAR2 = s1 + '&a=mime:application/x-fits' url = BASE + PAR1+ PAR2 sp_typ =", "PAR1 = f'&c=i&o=elodie:{dataset}/{imanum}' PAR2 = s1 + '&a=mime:application/x-fits' url = BASE + PAR1+", "= ['bjd','sn26','exptime'] url, df = _get_df(url_, desc_so_spec, int_cols, float_cols) print(url.replace('a=csv', 'a=htab')) return df", "url, df = _get_df(url_, desc_so_spec, int_cols, float_cols) print(url.replace('a=csv', 'a=htab')) return df def get_spec(self,", "self.BASE + f'c=i&a=mime:application/fits&o=sophie:[{s1d},{seq}]' filename = f'sophie_[{s1d},{seq}].fits' path = '' if path is None", "_get_df(url_, desc_el_ccf, int_cols, float_cols) print(url.replace('a=csv', 'a=htab')) return df def spec(self): \"\"\" Elodie Spectra", "obj): \"\"\" Sophie class Parameters ---------- obj (str) : object name Methods -------", "= df[float_cols].astype(float) df[int_cols] = df[int_cols].astype(int) return url, df class Elodie: def __init__(self, obj):", "as np import pandas as pd import requests from urllib.request import urlretrieve from", "def spec(self): \"\"\" Elodie Spectra table \"\"\" url_ = self.BASE + f'o={self.obj}&a=csv&d=' int_cols", "obj (str) : object name Methods ------- ccf : return Cross-Correlation Functions table", "Sophie, both could be constructed by passing an object name. Help on Elodie", "Elodie FITS files: http://atlas.obs-hp.fr/elodie/500/download.html Help on Sophie FITS files: http://atlas.obs-hp.fr/sophie/spec_help.html \"\"\" import numpy", "= _get_df(url_, desc_so_spec, int_cols, float_cols) print(url.replace('a=csv', 'a=htab')) return df def get_spec(self, seq, path=None,", "= obj self.BASE = 'http://atlas.obs-hp.fr/sophie/sophie.cgi?' def ccf(self): \"\"\" Sophie Cross-Correlation Functions table \"\"\"", "path is None else path urlretrieve(url, path+filename) class Sophie: def __init__(self, obj): \"\"\"", "lines = r.splitlines() valid_lines = [i for i in lines if i[0]!='#'] cols", "= 'http://atlas.obs-hp.fr/sophie/sophie.cgi?' def ccf(self): \"\"\" Sophie Cross-Correlation Functions table \"\"\" url_ = self.BASE", "self.BASE = 'http://atlas.obs-hp.fr/elodie/fE.cgi?' def ccf(self): \"\"\" Elodie Cross-Correlation Functions table \"\"\" url_ =", "else 'e2ds' url = self.BASE + f'c=i&a=mime:application/fits&o=sophie:[{s1d},{seq}]' filename = f'sophie_[{s1d},{seq}].fits' path = ''", "'http://atlas.obs-hp.fr/sophie/sophie.cgi?' def ccf(self): \"\"\" Sophie Cross-Correlation Functions table \"\"\" url_ = self.BASE +", "['jdb','exptim','sn','vfit','sigfit','ampfit','ctefit'] url, df = _get_df(url_, desc_el_ccf, int_cols, float_cols) print(url.replace('a=csv', 'a=htab')) return df def", "\"\"\" Elodie Spectra table \"\"\" url_ = self.BASE + f'o={self.obj}&a=csv&d=' int_cols = ['dataset']", "= f'&c=i&o=elodie:{dataset}/{imanum}' PAR2 = s1 + '&a=mime:application/x-fits' url = BASE + PAR1+ PAR2", "table spect : Spectra table \"\"\" self.obj = obj self.BASE = 'http://atlas.obs-hp.fr/elodie/fE.cgi?' def", "= '' if path is None else path urlretrieve(url, path+filename) class Sophie: def", "Help on Sophie FITS files: http://atlas.obs-hp.fr/sophie/spec_help.html \"\"\" import numpy as np import pandas", "This module retrieves data from ELODIE/SOPHIE archive. It has two classes, Elodie and", "table spect : Spectra table \"\"\" self.obj = obj self.BASE = 'http://atlas.obs-hp.fr/sophie/sophie.cgi?' def", "PAR1+ PAR2 sp_typ = 's1d_' if s1d else 's2d_' filename = sp_typ +", "df def get_spec(self, seq, path=None, s1d=True): s1d = 's1d' if s1d==True else 'e2ds'", "sp_typ = 's1d_' if s1d else 's2d_' filename = sp_typ + f'elodie_{dataset}_{imanum}.fits' path", "be constructed by passing an object name. Help on Elodie FITS files: http://atlas.obs-hp.fr/elodie/500/download.html", "url = url_ + str(list(col_dc.keys())).replace(\"'\", \"\").replace(\" \", \"\")[1:-1] req = requests.request('GET', url) r", "Sophie class Parameters ---------- obj (str) : object name Methods ------- ccf :", "float_cols = ['jdb','exptim','sn','vfit','sigfit','ampfit','ctefit'] url, df = _get_df(url_, desc_el_ccf, int_cols, float_cols) print(url.replace('a=csv', 'a=htab')) return", "float_cols) print(url.replace('a=csv', 'a=htab')) return df def spec(self): \"\"\" Elodie Spectra table \"\"\" url_", "Spectra table \"\"\" self.obj = obj self.BASE = 'http://atlas.obs-hp.fr/sophie/sophie.cgi?' def ccf(self): \"\"\" Sophie", "self.BASE + f'n=sophie&a=csv&ob=bjd&c=o&o={self.obj}&d=' int_cols = ['seq','sseq','slen','nexp','expno'] float_cols = ['bjd','sn26','exptime'] url, df = _get_df(url_,", "f'elodie_{dataset}_{imanum}.fits' path = '' if path is None else path urlretrieve(url, path+filename) class", ": object name Methods ------- ccf : return Cross-Correlation Functions table spect :", "= [i.split('\\t') for i in valid_lines[1:]] df = pd.DataFrame(data_lines, columns=cols) for i in", "path=None, s1d=True): s1d = 's1d' if s1d==True else 'e2ds' url = self.BASE +", "print(url.replace('a=csv', 'a=htab')) return df def get_spec(self, dataset, imanum, path=None, s1d=True): BASE = 'http://atlas.obs-hp.fr/elodie/E.cgi?'", "['dataset'] float_cols = ['exptime','sn','vfit','sigfit','ampfit'] url, df = _get_df(url_, desc_el_spec, int_cols, float_cols) print(url.replace('a=csv', 'a=htab'))", "from ELODIE/SOPHIE archive. It has two classes, Elodie and Sophie, both could be", "+ f'elodie_{dataset}_{imanum}.fits' path = '' if path is None else path urlretrieve(url, path+filename)", "def get_spec(self, dataset, imanum, path=None, s1d=True): BASE = 'http://atlas.obs-hp.fr/elodie/E.cgi?' s1 = '&z=s1d' if", "np import pandas as pd import requests from urllib.request import urlretrieve from .columns", "Functions table \"\"\" url_ = self.BASE + f'n=e501&o={self.obj}&ob=jdb&a=csv&&d=' int_cols = ['datenuit'] float_cols =", "= self.BASE + f'n=e501&o={self.obj}&ob=jdb&a=csv&&d=' int_cols = ['datenuit'] float_cols = ['jdb','exptim','sn','vfit','sigfit','ampfit','ctefit'] url, df =", "df def get_spec(self, dataset, imanum, path=None, s1d=True): BASE = 'http://atlas.obs-hp.fr/elodie/E.cgi?' s1 = '&z=s1d'", "else '' PAR1 = f'&c=i&o=elodie:{dataset}/{imanum}' PAR2 = s1 + '&a=mime:application/x-fits' url = BASE", "s1d else 's2d_' filename = sp_typ + f'elodie_{dataset}_{imanum}.fits' path = '' if path", "an object name. Help on Elodie FITS files: http://atlas.obs-hp.fr/elodie/500/download.html Help on Sophie FITS", "by passing an object name. Help on Elodie FITS files: http://atlas.obs-hp.fr/elodie/500/download.html Help on", "'e2ds' url = self.BASE + f'c=i&a=mime:application/fits&o=sophie:[{s1d},{seq}]' filename = f'sophie_[{s1d},{seq}].fits' path = '' if", "[i.split('\\t') for i in valid_lines[1:]] df = pd.DataFrame(data_lines, columns=cols) for i in df.columns:", "obj self.BASE = 'http://atlas.obs-hp.fr/elodie/fE.cgi?' def ccf(self): \"\"\" Elodie Cross-Correlation Functions table \"\"\" url_", "+ f'c=i&a=mime:application/fits&o=sophie:[{s1d},{seq}]' filename = f'sophie_[{s1d},{seq}].fits' path = '' if path is None else", "int_cols = ['seq','sseq','slen','nexp','expno','ccf_offline','maxcpp','lines'] float_cols = ['bjd','rv','err','dvrms','fwhm','span','contrast','sn26'] url, df = _get_df(url_, desc_so_ccf, int_cols, float_cols)", "Sophie Cross-Correlation Functions table \"\"\" url_ = self.BASE + f'n=sophiecc&ob=bjd&a=csv&o={self.obj}&d=' int_cols = ['seq','sseq','slen','nexp','expno','ccf_offline','maxcpp','lines']", "table \"\"\" self.obj = obj self.BASE = 'http://atlas.obs-hp.fr/sophie/sophie.cgi?' def ccf(self): \"\"\" Sophie Cross-Correlation", "= ['bjd','rv','err','dvrms','fwhm','span','contrast','sn26'] url, df = _get_df(url_, desc_so_ccf, int_cols, float_cols) print(url.replace('a=csv', 'a=htab')) return df", "\"\"\" url_ = self.BASE + f'n=sophiecc&ob=bjd&a=csv&o={self.obj}&d=' int_cols = ['seq','sseq','slen','nexp','expno','ccf_offline','maxcpp','lines'] float_cols = ['bjd','rv','err','dvrms','fwhm','span','contrast','sn26'] url,", "'http://atlas.obs-hp.fr/elodie/E.cgi?' s1 = '&z=s1d' if s1d else '' PAR1 = f'&c=i&o=elodie:{dataset}/{imanum}' PAR2 =", "str(list(col_dc.keys())).replace(\"'\", \"\").replace(\" \", \"\")[1:-1] req = requests.request('GET', url) r = req.content.decode('utf-8') lines =", "if s1d==True else 'e2ds' url = self.BASE + f'c=i&a=mime:application/fits&o=sophie:[{s1d},{seq}]' filename = f'sophie_[{s1d},{seq}].fits' path", "['exptime','sn','vfit','sigfit','ampfit'] url, df = _get_df(url_, desc_el_spec, int_cols, float_cols) print(url.replace('a=csv', 'a=htab')) return df def", "i in lines if i[0]!='#'] cols = valid_lines[0].split(' ') data_lines = [i.split('\\t') for", "= np.nan df[float_cols] = df[float_cols].astype(float) df[int_cols] = df[int_cols].astype(int) return url, df class Elodie:", "seq, path=None, s1d=True): s1d = 's1d' if s1d==True else 'e2ds' url = self.BASE", "Cross-Correlation Functions table \"\"\" url_ = self.BASE + f'n=e501&o={self.obj}&ob=jdb&a=csv&&d=' int_cols = ['datenuit'] float_cols", "module retrieves data from ELODIE/SOPHIE archive. It has two classes, Elodie and Sophie,", "__init__(self, obj): \"\"\" Elodie class Parameters ---------- obj (str) : object name Methods", "r.splitlines() valid_lines = [i for i in lines if i[0]!='#'] cols = valid_lines[0].split('", "spect : Spectra table \"\"\" self.obj = obj self.BASE = 'http://atlas.obs-hp.fr/elodie/fE.cgi?' def ccf(self):", "_get_df(url_, col_dc, int_cols, float_cols): url = url_ + str(list(col_dc.keys())).replace(\"'\", \"\").replace(\" \", \"\")[1:-1] req", "= _get_df(url_, desc_el_ccf, int_cols, float_cols) print(url.replace('a=csv', 'a=htab')) return df def spec(self): \"\"\" Elodie", "name. Help on Elodie FITS files: http://atlas.obs-hp.fr/elodie/500/download.html Help on Sophie FITS files: http://atlas.obs-hp.fr/sophie/spec_help.html", "'a=htab')) return df def get_spec(self, seq, path=None, s1d=True): s1d = 's1d' if s1d==True", "Help on Elodie FITS files: http://atlas.obs-hp.fr/elodie/500/download.html Help on Sophie FITS files: http://atlas.obs-hp.fr/sophie/spec_help.html \"\"\"", "int_cols, float_cols) print(url.replace('a=csv', 'a=htab')) return df def spec(self): \"\"\" Elodie Spectra table \"\"\"", "url_ = self.BASE + f'n=sophie&a=csv&ob=bjd&c=o&o={self.obj}&d=' int_cols = ['seq','sseq','slen','nexp','expno'] float_cols = ['bjd','sn26','exptime'] url, df", "both could be constructed by passing an object name. Help on Elodie FITS", "\"\").replace(\" \", \"\")[1:-1] req = requests.request('GET', url) r = req.content.decode('utf-8') lines = r.splitlines()", "path urlretrieve(url, path+filename) class Sophie: def __init__(self, obj): \"\"\" Sophie class Parameters ----------", "int_cols, float_cols): url = url_ + str(list(col_dc.keys())).replace(\"'\", \"\").replace(\" \", \"\")[1:-1] req = requests.request('GET',", "is None else path urlretrieve(url, path+filename) class Sophie: def __init__(self, obj): \"\"\" Sophie", "import urlretrieve from .columns import desc_el_ccf, desc_el_spec, desc_so_ccf, desc_so_spec def _get_df(url_, col_dc, int_cols,", "= BASE + PAR1+ PAR2 sp_typ = 's1d_' if s1d else 's2d_' filename", "table \"\"\" url_ = self.BASE + f'n=sophie&a=csv&ob=bjd&c=o&o={self.obj}&d=' int_cols = ['seq','sseq','slen','nexp','expno'] float_cols = ['bjd','sn26','exptime']", "in lines if i[0]!='#'] cols = valid_lines[0].split(' ') data_lines = [i.split('\\t') for i", "spec(self): \"\"\" Elodie Spectra table \"\"\" url_ = self.BASE + f'o={self.obj}&a=csv&d=' int_cols =", "') data_lines = [i.split('\\t') for i in valid_lines[1:]] df = pd.DataFrame(data_lines, columns=cols) for", "class Parameters ---------- obj (str) : object name Methods ------- ccf : return", "s1d==True else 'e2ds' url = self.BASE + f'c=i&a=mime:application/fits&o=sophie:[{s1d},{seq}]' filename = f'sophie_[{s1d},{seq}].fits' path =", "url = self.BASE + f'c=i&a=mime:application/fits&o=sophie:[{s1d},{seq}]' filename = f'sophie_[{s1d},{seq}].fits' path = '' if path", "int_cols = ['dataset'] float_cols = ['exptime','sn','vfit','sigfit','ampfit'] url, df = _get_df(url_, desc_el_spec, int_cols, float_cols)", "desc_el_spec, desc_so_ccf, desc_so_spec def _get_df(url_, col_dc, int_cols, float_cols): url = url_ + str(list(col_dc.keys())).replace(\"'\",", "numpy as np import pandas as pd import requests from urllib.request import urlretrieve", "s1d=True): BASE = 'http://atlas.obs-hp.fr/elodie/E.cgi?' s1 = '&z=s1d' if s1d else '' PAR1 =", "f'n=sophiecc&ob=bjd&a=csv&o={self.obj}&d=' int_cols = ['seq','sseq','slen','nexp','expno','ccf_offline','maxcpp','lines'] float_cols = ['bjd','rv','err','dvrms','fwhm','span','contrast','sn26'] url, df = _get_df(url_, desc_so_ccf, int_cols,", "for i in lines if i[0]!='#'] cols = valid_lines[0].split(' ') data_lines = [i.split('\\t')", "def _get_df(url_, col_dc, int_cols, float_cols): url = url_ + str(list(col_dc.keys())).replace(\"'\", \"\").replace(\" \", \"\")[1:-1]", "Elodie and Sophie, both could be constructed by passing an object name. Help", "df def spec(self): \"\"\" Elodie Spectra table \"\"\" url_ = self.BASE + f'o={self.obj}&a=csv&d='", "def __init__(self, obj): \"\"\" Sophie class Parameters ---------- obj (str) : object name", "two classes, Elodie and Sophie, both could be constructed by passing an object", "Spectra table \"\"\" self.obj = obj self.BASE = 'http://atlas.obs-hp.fr/elodie/fE.cgi?' def ccf(self): \"\"\" Elodie", "= 's1d' if s1d==True else 'e2ds' url = self.BASE + f'c=i&a=mime:application/fits&o=sophie:[{s1d},{seq}]' filename =", "col_dc, int_cols, float_cols): url = url_ + str(list(col_dc.keys())).replace(\"'\", \"\").replace(\" \", \"\")[1:-1] req =", "int_cols = ['datenuit'] float_cols = ['jdb','exptim','sn','vfit','sigfit','ampfit','ctefit'] url, df = _get_df(url_, desc_el_ccf, int_cols, float_cols)", "float_cols) print(url.replace('a=csv', 'a=htab')) return df def get_spec(self, seq, path=None, s1d=True): s1d = 's1d'", "f'c=i&a=mime:application/fits&o=sophie:[{s1d},{seq}]' filename = f'sophie_[{s1d},{seq}].fits' path = '' if path is None else path", "desc_so_spec, int_cols, float_cols) print(url.replace('a=csv', 'a=htab')) return df def get_spec(self, seq, path=None, s1d=True): s1d", "It has two classes, Elodie and Sophie, both could be constructed by passing", "s1d = 's1d' if s1d==True else 'e2ds' url = self.BASE + f'c=i&a=mime:application/fits&o=sophie:[{s1d},{seq}]' filename", "= ['seq','sseq','slen','nexp','expno','ccf_offline','maxcpp','lines'] float_cols = ['bjd','rv','err','dvrms','fwhm','span','contrast','sn26'] url, df = _get_df(url_, desc_so_ccf, int_cols, float_cols) print(url.replace('a=csv',", "\"\"\" Elodie class Parameters ---------- obj (str) : object name Methods ------- ccf", "+ str(list(col_dc.keys())).replace(\"'\", \"\").replace(\" \", \"\")[1:-1] req = requests.request('GET', url) r = req.content.decode('utf-8') lines", "= _get_df(url_, desc_so_ccf, int_cols, float_cols) print(url.replace('a=csv', 'a=htab')) return df def spec(self): \"\"\" Sophie", "[i for i in lines if i[0]!='#'] cols = valid_lines[0].split(' ') data_lines =", "sp_typ + f'elodie_{dataset}_{imanum}.fits' path = '' if path is None else path urlretrieve(url,", "http://atlas.obs-hp.fr/elodie/500/download.html Help on Sophie FITS files: http://atlas.obs-hp.fr/sophie/spec_help.html \"\"\" import numpy as np import", "= self.BASE + f'n=sophiecc&ob=bjd&a=csv&o={self.obj}&d=' int_cols = ['seq','sseq','slen','nexp','expno','ccf_offline','maxcpp','lines'] float_cols = ['bjd','rv','err','dvrms','fwhm','span','contrast','sn26'] url, df =", "\"\"\" Sophie class Parameters ---------- obj (str) : object name Methods ------- ccf", "from .columns import desc_el_ccf, desc_el_spec, desc_so_ccf, desc_so_spec def _get_df(url_, col_dc, int_cols, float_cols): url", "= s1 + '&a=mime:application/x-fits' url = BASE + PAR1+ PAR2 sp_typ = 's1d_'", "= df[int_cols].astype(int) return url, df class Elodie: def __init__(self, obj): \"\"\" Elodie class", "= requests.request('GET', url) r = req.content.decode('utf-8') lines = r.splitlines() valid_lines = [i for", "import desc_el_ccf, desc_el_spec, desc_so_ccf, desc_so_spec def _get_df(url_, col_dc, int_cols, float_cols): url = url_", "req = requests.request('GET', url) r = req.content.decode('utf-8') lines = r.splitlines() valid_lines = [i", "df[float_cols].astype(float) df[int_cols] = df[int_cols].astype(int) return url, df class Elodie: def __init__(self, obj): \"\"\"", "['seq','sseq','slen','nexp','expno'] float_cols = ['bjd','sn26','exptime'] url, df = _get_df(url_, desc_so_spec, int_cols, float_cols) print(url.replace('a=csv', 'a=htab'))", "------- ccf : return Cross-Correlation Functions table spect : Spectra table \"\"\" self.obj", "s1d=True): s1d = 's1d' if s1d==True else 'e2ds' url = self.BASE + f'c=i&a=mime:application/fits&o=sophie:[{s1d},{seq}]'", "passing an object name. Help on Elodie FITS files: http://atlas.obs-hp.fr/elodie/500/download.html Help on Sophie", "\"\"\" import numpy as np import pandas as pd import requests from urllib.request", "s1d else '' PAR1 = f'&c=i&o=elodie:{dataset}/{imanum}' PAR2 = s1 + '&a=mime:application/x-fits' url =", "else 's2d_' filename = sp_typ + f'elodie_{dataset}_{imanum}.fits' path = '' if path is", "\"\"\" url_ = self.BASE + f'n=e501&o={self.obj}&ob=jdb&a=csv&&d=' int_cols = ['datenuit'] float_cols = ['jdb','exptim','sn','vfit','sigfit','ampfit','ctefit'] url,", "= ['dataset'] float_cols = ['exptime','sn','vfit','sigfit','ampfit'] url, df = _get_df(url_, desc_el_spec, int_cols, float_cols) print(url.replace('a=csv',", "pd.DataFrame(data_lines, columns=cols) for i in df.columns: df.loc[df[i]=='', i] = np.nan df[float_cols] = df[float_cols].astype(float)", "if path is None else path urlretrieve(url, path+filename) class Sophie: def __init__(self, obj):", "import numpy as np import pandas as pd import requests from urllib.request import", "float_cols) print(url.replace('a=csv', 'a=htab')) return df def get_spec(self, dataset, imanum, path=None, s1d=True): BASE =", "valid_lines = [i for i in lines if i[0]!='#'] cols = valid_lines[0].split(' ')", "spect : Spectra table \"\"\" self.obj = obj self.BASE = 'http://atlas.obs-hp.fr/sophie/sophie.cgi?' def ccf(self):", "float_cols): url = url_ + str(list(col_dc.keys())).replace(\"'\", \"\").replace(\" \", \"\")[1:-1] req = requests.request('GET', url)", "i[0]!='#'] cols = valid_lines[0].split(' ') data_lines = [i.split('\\t') for i in valid_lines[1:]] df", "f'o={self.obj}&a=csv&d=' int_cols = ['dataset'] float_cols = ['exptime','sn','vfit','sigfit','ampfit'] url, df = _get_df(url_, desc_el_spec, int_cols,", "for i in valid_lines[1:]] df = pd.DataFrame(data_lines, columns=cols) for i in df.columns: df.loc[df[i]=='',", "from urllib.request import urlretrieve from .columns import desc_el_ccf, desc_el_spec, desc_so_ccf, desc_so_spec def _get_df(url_,", "df class Elodie: def __init__(self, obj): \"\"\" Elodie class Parameters ---------- obj (str)", "desc_el_ccf, desc_el_spec, desc_so_ccf, desc_so_spec def _get_df(url_, col_dc, int_cols, float_cols): url = url_ +", "+ f'n=e501&o={self.obj}&ob=jdb&a=csv&&d=' int_cols = ['datenuit'] float_cols = ['jdb','exptim','sn','vfit','sigfit','ampfit','ctefit'] url, df = _get_df(url_, desc_el_ccf,", "'a=htab')) return df def spec(self): \"\"\" Elodie Spectra table \"\"\" url_ = self.BASE", "table \"\"\" self.obj = obj self.BASE = 'http://atlas.obs-hp.fr/elodie/fE.cgi?' def ccf(self): \"\"\" Elodie Cross-Correlation", "url, df = _get_df(url_, desc_so_ccf, int_cols, float_cols) print(url.replace('a=csv', 'a=htab')) return df def spec(self):", "print(url.replace('a=csv', 'a=htab')) return df def spec(self): \"\"\" Sophie Spectra table \"\"\" url_ =", "= pd.DataFrame(data_lines, columns=cols) for i in df.columns: df.loc[df[i]=='', i] = np.nan df[float_cols] =", "else path urlretrieve(url, path+filename) class Sophie: def __init__(self, obj): \"\"\" Sophie class Parameters", ": Spectra table \"\"\" self.obj = obj self.BASE = 'http://atlas.obs-hp.fr/elodie/fE.cgi?' def ccf(self): \"\"\"", "Cross-Correlation Functions table spect : Spectra table \"\"\" self.obj = obj self.BASE =", "'http://atlas.obs-hp.fr/elodie/fE.cgi?' def ccf(self): \"\"\" Elodie Cross-Correlation Functions table \"\"\" url_ = self.BASE +", "df = _get_df(url_, desc_so_spec, int_cols, float_cols) print(url.replace('a=csv', 'a=htab')) return df def get_spec(self, seq,", "def ccf(self): \"\"\" Elodie Cross-Correlation Functions table \"\"\" url_ = self.BASE + f'n=e501&o={self.obj}&ob=jdb&a=csv&&d='", "valid_lines[0].split(' ') data_lines = [i.split('\\t') for i in valid_lines[1:]] df = pd.DataFrame(data_lines, columns=cols)", "ccf(self): \"\"\" Elodie Cross-Correlation Functions table \"\"\" url_ = self.BASE + f'n=e501&o={self.obj}&ob=jdb&a=csv&&d=' int_cols", "= self.BASE + f'n=sophie&a=csv&ob=bjd&c=o&o={self.obj}&d=' int_cols = ['seq','sseq','slen','nexp','expno'] float_cols = ['bjd','sn26','exptime'] url, df =", "Elodie Spectra table \"\"\" url_ = self.BASE + f'o={self.obj}&a=csv&d=' int_cols = ['dataset'] float_cols", "['seq','sseq','slen','nexp','expno','ccf_offline','maxcpp','lines'] float_cols = ['bjd','rv','err','dvrms','fwhm','span','contrast','sn26'] url, df = _get_df(url_, desc_so_ccf, int_cols, float_cols) print(url.replace('a=csv', 'a=htab'))", "print(url.replace('a=csv', 'a=htab')) return df def get_spec(self, seq, path=None, s1d=True): s1d = 's1d' if", "\"\"\" Elodie Cross-Correlation Functions table \"\"\" url_ = self.BASE + f'n=e501&o={self.obj}&ob=jdb&a=csv&&d=' int_cols =", "Elodie class Parameters ---------- obj (str) : object name Methods ------- ccf :", "df def spec(self): \"\"\" Sophie Spectra table \"\"\" url_ = self.BASE + f'n=sophie&a=csv&ob=bjd&c=o&o={self.obj}&d='", "\"\"\" url_ = self.BASE + f'o={self.obj}&a=csv&d=' int_cols = ['dataset'] float_cols = ['exptime','sn','vfit','sigfit','ampfit'] url,", "if s1d else '' PAR1 = f'&c=i&o=elodie:{dataset}/{imanum}' PAR2 = s1 + '&a=mime:application/x-fits' url", "df = _get_df(url_, desc_el_ccf, int_cols, float_cols) print(url.replace('a=csv', 'a=htab')) return df def spec(self): \"\"\"", "url_ = self.BASE + f'o={self.obj}&a=csv&d=' int_cols = ['dataset'] float_cols = ['exptime','sn','vfit','sigfit','ampfit'] url, df", "path = '' if path is None else path urlretrieve(url, path+filename) class Sophie:", "_get_df(url_, desc_el_spec, int_cols, float_cols) print(url.replace('a=csv', 'a=htab')) return df def get_spec(self, dataset, imanum, path=None,", "filename = sp_typ + f'elodie_{dataset}_{imanum}.fits' path = '' if path is None else", "data_lines = [i.split('\\t') for i in valid_lines[1:]] df = pd.DataFrame(data_lines, columns=cols) for i", "urlretrieve(url, path+filename) class Sophie: def __init__(self, obj): \"\"\" Sophie class Parameters ---------- obj", "\"\"\" url_ = self.BASE + f'n=sophie&a=csv&ob=bjd&c=o&o={self.obj}&d=' int_cols = ['seq','sseq','slen','nexp','expno'] float_cols = ['bjd','sn26','exptime'] url,", "return df def get_spec(self, seq, path=None, s1d=True): s1d = 's1d' if s1d==True else", "= '&z=s1d' if s1d else '' PAR1 = f'&c=i&o=elodie:{dataset}/{imanum}' PAR2 = s1 +", "retrieves data from ELODIE/SOPHIE archive. It has two classes, Elodie and Sophie, both", "return Cross-Correlation Functions table spect : Spectra table \"\"\" self.obj = obj self.BASE", "for i in df.columns: df.loc[df[i]=='', i] = np.nan df[float_cols] = df[float_cols].astype(float) df[int_cols] =", "= req.content.decode('utf-8') lines = r.splitlines() valid_lines = [i for i in lines if", "name Methods ------- ccf : return Cross-Correlation Functions table spect : Spectra table", "float_cols = ['bjd','sn26','exptime'] url, df = _get_df(url_, desc_so_spec, int_cols, float_cols) print(url.replace('a=csv', 'a=htab')) return", "= ['jdb','exptim','sn','vfit','sigfit','ampfit','ctefit'] url, df = _get_df(url_, desc_el_ccf, int_cols, float_cols) print(url.replace('a=csv', 'a=htab')) return df", "urllib.request import urlretrieve from .columns import desc_el_ccf, desc_el_spec, desc_so_ccf, desc_so_spec def _get_df(url_, col_dc,", "'&z=s1d' if s1d else '' PAR1 = f'&c=i&o=elodie:{dataset}/{imanum}' PAR2 = s1 + '&a=mime:application/x-fits'", "i] = np.nan df[float_cols] = df[float_cols].astype(float) df[int_cols] = df[int_cols].astype(int) return url, df class", "= ['exptime','sn','vfit','sigfit','ampfit'] url, df = _get_df(url_, desc_el_spec, int_cols, float_cols) print(url.replace('a=csv', 'a=htab')) return df", "desc_so_ccf, desc_so_spec def _get_df(url_, col_dc, int_cols, float_cols): url = url_ + str(list(col_dc.keys())).replace(\"'\", \"\").replace(\"", "imanum, path=None, s1d=True): BASE = 'http://atlas.obs-hp.fr/elodie/E.cgi?' s1 = '&z=s1d' if s1d else ''", "\"\"\" Sophie Spectra table \"\"\" url_ = self.BASE + f'n=sophie&a=csv&ob=bjd&c=o&o={self.obj}&d=' int_cols = ['seq','sseq','slen','nexp','expno']", "data from ELODIE/SOPHIE archive. It has two classes, Elodie and Sophie, both could", "table \"\"\" url_ = self.BASE + f'n=sophiecc&ob=bjd&a=csv&o={self.obj}&d=' int_cols = ['seq','sseq','slen','nexp','expno','ccf_offline','maxcpp','lines'] float_cols = ['bjd','rv','err','dvrms','fwhm','span','contrast','sn26']", "i in df.columns: df.loc[df[i]=='', i] = np.nan df[float_cols] = df[float_cols].astype(float) df[int_cols] = df[int_cols].astype(int)", "+ f'n=sophiecc&ob=bjd&a=csv&o={self.obj}&d=' int_cols = ['seq','sseq','slen','nexp','expno','ccf_offline','maxcpp','lines'] float_cols = ['bjd','rv','err','dvrms','fwhm','span','contrast','sn26'] url, df = _get_df(url_, desc_so_ccf,", "url_ + str(list(col_dc.keys())).replace(\"'\", \"\").replace(\" \", \"\")[1:-1] req = requests.request('GET', url) r = req.content.decode('utf-8')", "path+filename) class Sophie: def __init__(self, obj): \"\"\" Sophie class Parameters ---------- obj (str)", "has two classes, Elodie and Sophie, both could be constructed by passing an", "url_ = self.BASE + f'n=e501&o={self.obj}&ob=jdb&a=csv&&d=' int_cols = ['datenuit'] float_cols = ['jdb','exptim','sn','vfit','sigfit','ampfit','ctefit'] url, df", "'a=htab')) return df def get_spec(self, dataset, imanum, path=None, s1d=True): BASE = 'http://atlas.obs-hp.fr/elodie/E.cgi?' s1", "\"\"\" self.obj = obj self.BASE = 'http://atlas.obs-hp.fr/sophie/sophie.cgi?' def ccf(self): \"\"\" Sophie Cross-Correlation Functions", "= _get_df(url_, desc_el_spec, int_cols, float_cols) print(url.replace('a=csv', 'a=htab')) return df def get_spec(self, dataset, imanum,", ".columns import desc_el_ccf, desc_el_spec, desc_so_ccf, desc_so_spec def _get_df(url_, col_dc, int_cols, float_cols): url =", "url, df = _get_df(url_, desc_el_spec, int_cols, float_cols) print(url.replace('a=csv', 'a=htab')) return df def get_spec(self,", "Module core =========== This module retrieves data from ELODIE/SOPHIE archive. It has two", "url = BASE + PAR1+ PAR2 sp_typ = 's1d_' if s1d else 's2d_'", "object name Methods ------- ccf : return Cross-Correlation Functions table spect : Spectra", "= valid_lines[0].split(' ') data_lines = [i.split('\\t') for i in valid_lines[1:]] df = pd.DataFrame(data_lines,", "'' PAR1 = f'&c=i&o=elodie:{dataset}/{imanum}' PAR2 = s1 + '&a=mime:application/x-fits' url = BASE +", "df = pd.DataFrame(data_lines, columns=cols) for i in df.columns: df.loc[df[i]=='', i] = np.nan df[float_cols]", "= self.BASE + f'o={self.obj}&a=csv&d=' int_cols = ['dataset'] float_cols = ['exptime','sn','vfit','sigfit','ampfit'] url, df =", "int_cols, float_cols) print(url.replace('a=csv', 'a=htab')) return df def spec(self): \"\"\" Sophie Spectra table \"\"\"", "and Sophie, both could be constructed by passing an object name. Help on", "'s2d_' filename = sp_typ + f'elodie_{dataset}_{imanum}.fits' path = '' if path is None", "(str) : object name Methods ------- ccf : return Cross-Correlation Functions table spect", "valid_lines[1:]] df = pd.DataFrame(data_lines, columns=cols) for i in df.columns: df.loc[df[i]=='', i] = np.nan", "+ f'o={self.obj}&a=csv&d=' int_cols = ['dataset'] float_cols = ['exptime','sn','vfit','sigfit','ampfit'] url, df = _get_df(url_, desc_el_spec,", "Spectra table \"\"\" url_ = self.BASE + f'o={self.obj}&a=csv&d=' int_cols = ['dataset'] float_cols =", "= ['datenuit'] float_cols = ['jdb','exptim','sn','vfit','sigfit','ampfit','ctefit'] url, df = _get_df(url_, desc_el_ccf, int_cols, float_cols) print(url.replace('a=csv',", "def get_spec(self, seq, path=None, s1d=True): s1d = 's1d' if s1d==True else 'e2ds' url", "def __init__(self, obj): \"\"\" Elodie class Parameters ---------- obj (str) : object name", "url, df = _get_df(url_, desc_el_ccf, int_cols, float_cols) print(url.replace('a=csv', 'a=htab')) return df def spec(self):", "+ PAR1+ PAR2 sp_typ = 's1d_' if s1d else 's2d_' filename = sp_typ", "'a=htab')) return df def spec(self): \"\"\" Sophie Spectra table \"\"\" url_ = self.BASE", "return df def spec(self): \"\"\" Elodie Spectra table \"\"\" url_ = self.BASE +", "files: http://atlas.obs-hp.fr/elodie/500/download.html Help on Sophie FITS files: http://atlas.obs-hp.fr/sophie/spec_help.html \"\"\" import numpy as np", "'&a=mime:application/x-fits' url = BASE + PAR1+ PAR2 sp_typ = 's1d_' if s1d else", "Spectra table \"\"\" url_ = self.BASE + f'n=sophie&a=csv&ob=bjd&c=o&o={self.obj}&d=' int_cols = ['seq','sseq','slen','nexp','expno'] float_cols =", "http://atlas.obs-hp.fr/sophie/spec_help.html \"\"\" import numpy as np import pandas as pd import requests from", "class Elodie: def __init__(self, obj): \"\"\" Elodie class Parameters ---------- obj (str) :", "self.obj = obj self.BASE = 'http://atlas.obs-hp.fr/sophie/sophie.cgi?' def ccf(self): \"\"\" Sophie Cross-Correlation Functions table", "=========== This module retrieves data from ELODIE/SOPHIE archive. It has two classes, Elodie", "= 'http://atlas.obs-hp.fr/elodie/fE.cgi?' def ccf(self): \"\"\" Elodie Cross-Correlation Functions table \"\"\" url_ = self.BASE", "'s1d' if s1d==True else 'e2ds' url = self.BASE + f'c=i&a=mime:application/fits&o=sophie:[{s1d},{seq}]' filename = f'sophie_[{s1d},{seq}].fits'", "['datenuit'] float_cols = ['jdb','exptim','sn','vfit','sigfit','ampfit','ctefit'] url, df = _get_df(url_, desc_el_ccf, int_cols, float_cols) print(url.replace('a=csv', 'a=htab'))", "urlretrieve from .columns import desc_el_ccf, desc_el_spec, desc_so_ccf, desc_so_spec def _get_df(url_, col_dc, int_cols, float_cols):", "Sophie FITS files: http://atlas.obs-hp.fr/sophie/spec_help.html \"\"\" import numpy as np import pandas as pd", "dataset, imanum, path=None, s1d=True): BASE = 'http://atlas.obs-hp.fr/elodie/E.cgi?' s1 = '&z=s1d' if s1d else", "f'n=sophie&a=csv&ob=bjd&c=o&o={self.obj}&d=' int_cols = ['seq','sseq','slen','nexp','expno'] float_cols = ['bjd','sn26','exptime'] url, df = _get_df(url_, desc_so_spec, int_cols,", "def ccf(self): \"\"\" Sophie Cross-Correlation Functions table \"\"\" url_ = self.BASE + f'n=sophiecc&ob=bjd&a=csv&o={self.obj}&d='", "None else path urlretrieve(url, path+filename) class Sophie: def __init__(self, obj): \"\"\" Sophie class", "classes, Elodie and Sophie, both could be constructed by passing an object name.", "BASE = 'http://atlas.obs-hp.fr/elodie/E.cgi?' s1 = '&z=s1d' if s1d else '' PAR1 = f'&c=i&o=elodie:{dataset}/{imanum}'", "'s1d_' if s1d else 's2d_' filename = sp_typ + f'elodie_{dataset}_{imanum}.fits' path = ''", ": Spectra table \"\"\" self.obj = obj self.BASE = 'http://atlas.obs-hp.fr/sophie/sophie.cgi?' def ccf(self): \"\"\"", "get_spec(self, seq, path=None, s1d=True): s1d = 's1d' if s1d==True else 'e2ds' url =", "on Sophie FITS files: http://atlas.obs-hp.fr/sophie/spec_help.html \"\"\" import numpy as np import pandas as", "\"\")[1:-1] req = requests.request('GET', url) r = req.content.decode('utf-8') lines = r.splitlines() valid_lines =", "table \"\"\" url_ = self.BASE + f'o={self.obj}&a=csv&d=' int_cols = ['dataset'] float_cols = ['exptime','sn','vfit','sigfit','ampfit']", "Sophie: def __init__(self, obj): \"\"\" Sophie class Parameters ---------- obj (str) : object", "s1 = '&z=s1d' if s1d else '' PAR1 = f'&c=i&o=elodie:{dataset}/{imanum}' PAR2 = s1", "pandas as pd import requests from urllib.request import urlretrieve from .columns import desc_el_ccf,", "df = _get_df(url_, desc_so_ccf, int_cols, float_cols) print(url.replace('a=csv', 'a=htab')) return df def spec(self): \"\"\"", "import requests from urllib.request import urlretrieve from .columns import desc_el_ccf, desc_el_spec, desc_so_ccf, desc_so_spec", "= 'http://atlas.obs-hp.fr/elodie/E.cgi?' s1 = '&z=s1d' if s1d else '' PAR1 = f'&c=i&o=elodie:{dataset}/{imanum}' PAR2", "constructed by passing an object name. Help on Elodie FITS files: http://atlas.obs-hp.fr/elodie/500/download.html Help", "desc_so_spec def _get_df(url_, col_dc, int_cols, float_cols): url = url_ + str(list(col_dc.keys())).replace(\"'\", \"\").replace(\" \",", "<filename>stelspec/core.py \"\"\" Module core =========== This module retrieves data from ELODIE/SOPHIE archive. It", "in df.columns: df.loc[df[i]=='', i] = np.nan df[float_cols] = df[float_cols].astype(float) df[int_cols] = df[int_cols].astype(int) return", "Methods ------- ccf : return Cross-Correlation Functions table spect : Spectra table \"\"\"", "as pd import requests from urllib.request import urlretrieve from .columns import desc_el_ccf, desc_el_spec,", "columns=cols) for i in df.columns: df.loc[df[i]=='', i] = np.nan df[float_cols] = df[float_cols].astype(float) df[int_cols]", "url) r = req.content.decode('utf-8') lines = r.splitlines() valid_lines = [i for i in", "obj): \"\"\" Elodie class Parameters ---------- obj (str) : object name Methods -------", "self.BASE = 'http://atlas.obs-hp.fr/sophie/sophie.cgi?' def ccf(self): \"\"\" Sophie Cross-Correlation Functions table \"\"\" url_ =", "df[int_cols].astype(int) return url, df class Elodie: def __init__(self, obj): \"\"\" Elodie class Parameters", "files: http://atlas.obs-hp.fr/sophie/spec_help.html \"\"\" import numpy as np import pandas as pd import requests", "\"\"\" Sophie Cross-Correlation Functions table \"\"\" url_ = self.BASE + f'n=sophiecc&ob=bjd&a=csv&o={self.obj}&d=' int_cols =", "class Sophie: def __init__(self, obj): \"\"\" Sophie class Parameters ---------- obj (str) :", "table \"\"\" url_ = self.BASE + f'n=e501&o={self.obj}&ob=jdb&a=csv&&d=' int_cols = ['datenuit'] float_cols = ['jdb','exptim','sn','vfit','sigfit','ampfit','ctefit']", "_get_df(url_, desc_so_ccf, int_cols, float_cols) print(url.replace('a=csv', 'a=htab')) return df def spec(self): \"\"\" Sophie Spectra", "in valid_lines[1:]] df = pd.DataFrame(data_lines, columns=cols) for i in df.columns: df.loc[df[i]=='', i] =", "filename = f'sophie_[{s1d},{seq}].fits' path = '' if path is None else path urlretrieve(url,", "r = req.content.decode('utf-8') lines = r.splitlines() valid_lines = [i for i in lines", "Functions table spect : Spectra table \"\"\" self.obj = obj self.BASE = 'http://atlas.obs-hp.fr/elodie/fE.cgi?'", "int_cols = ['seq','sseq','slen','nexp','expno'] float_cols = ['bjd','sn26','exptime'] url, df = _get_df(url_, desc_so_spec, int_cols, float_cols)", "_get_df(url_, desc_so_spec, int_cols, float_cols) print(url.replace('a=csv', 'a=htab')) return df def get_spec(self, seq, path=None, s1d=True):", "if s1d else 's2d_' filename = sp_typ + f'elodie_{dataset}_{imanum}.fits' path = '' if", "= 's1d_' if s1d else 's2d_' filename = sp_typ + f'elodie_{dataset}_{imanum}.fits' path =", "Functions table \"\"\" url_ = self.BASE + f'n=sophiecc&ob=bjd&a=csv&o={self.obj}&d=' int_cols = ['seq','sseq','slen','nexp','expno','ccf_offline','maxcpp','lines'] float_cols =", "spec(self): \"\"\" Sophie Spectra table \"\"\" url_ = self.BASE + f'n=sophie&a=csv&ob=bjd&c=o&o={self.obj}&d=' int_cols =", "\", \"\")[1:-1] req = requests.request('GET', url) r = req.content.decode('utf-8') lines = r.splitlines() valid_lines", "Functions table spect : Spectra table \"\"\" self.obj = obj self.BASE = 'http://atlas.obs-hp.fr/sophie/sophie.cgi?'", "i in valid_lines[1:]] df = pd.DataFrame(data_lines, columns=cols) for i in df.columns: df.loc[df[i]=='', i]", "df.columns: df.loc[df[i]=='', i] = np.nan df[float_cols] = df[float_cols].astype(float) df[int_cols] = df[int_cols].astype(int) return url,", "path=None, s1d=True): BASE = 'http://atlas.obs-hp.fr/elodie/E.cgi?' s1 = '&z=s1d' if s1d else '' PAR1", "core =========== This module retrieves data from ELODIE/SOPHIE archive. It has two classes,", "import pandas as pd import requests from urllib.request import urlretrieve from .columns import", "desc_el_spec, int_cols, float_cols) print(url.replace('a=csv', 'a=htab')) return df def get_spec(self, dataset, imanum, path=None, s1d=True):", "could be constructed by passing an object name. Help on Elodie FITS files:", "['bjd','sn26','exptime'] url, df = _get_df(url_, desc_so_spec, int_cols, float_cols) print(url.replace('a=csv', 'a=htab')) return df def", "PAR2 sp_typ = 's1d_' if s1d else 's2d_' filename = sp_typ + f'elodie_{dataset}_{imanum}.fits'", "df.loc[df[i]=='', i] = np.nan df[float_cols] = df[float_cols].astype(float) df[int_cols] = df[int_cols].astype(int) return url, df", "object name. Help on Elodie FITS files: http://atlas.obs-hp.fr/elodie/500/download.html Help on Sophie FITS files:", "int_cols, float_cols) print(url.replace('a=csv', 'a=htab')) return df def get_spec(self, dataset, imanum, path=None, s1d=True): BASE", "get_spec(self, dataset, imanum, path=None, s1d=True): BASE = 'http://atlas.obs-hp.fr/elodie/E.cgi?' s1 = '&z=s1d' if s1d", "desc_so_ccf, int_cols, float_cols) print(url.replace('a=csv', 'a=htab')) return df def spec(self): \"\"\" Sophie Spectra table", "BASE + PAR1+ PAR2 sp_typ = 's1d_' if s1d else 's2d_' filename =", "if i[0]!='#'] cols = valid_lines[0].split(' ') data_lines = [i.split('\\t') for i in valid_lines[1:]]", "ccf(self): \"\"\" Sophie Cross-Correlation Functions table \"\"\" url_ = self.BASE + f'n=sophiecc&ob=bjd&a=csv&o={self.obj}&d=' int_cols", "+ f'n=sophie&a=csv&ob=bjd&c=o&o={self.obj}&d=' int_cols = ['seq','sseq','slen','nexp','expno'] float_cols = ['bjd','sn26','exptime'] url, df = _get_df(url_, desc_so_spec,", "obj self.BASE = 'http://atlas.obs-hp.fr/sophie/sophie.cgi?' def ccf(self): \"\"\" Sophie Cross-Correlation Functions table \"\"\" url_", "return df def get_spec(self, dataset, imanum, path=None, s1d=True): BASE = 'http://atlas.obs-hp.fr/elodie/E.cgi?' s1 =", "pd import requests from urllib.request import urlretrieve from .columns import desc_el_ccf, desc_el_spec, desc_so_ccf,", "Sophie Spectra table \"\"\" url_ = self.BASE + f'n=sophie&a=csv&ob=bjd&c=o&o={self.obj}&d=' int_cols = ['seq','sseq','slen','nexp','expno'] float_cols", "ccf : return Cross-Correlation Functions table spect : Spectra table \"\"\" self.obj =", "archive. It has two classes, Elodie and Sophie, both could be constructed by", "FITS files: http://atlas.obs-hp.fr/sophie/spec_help.html \"\"\" import numpy as np import pandas as pd import", "self.obj = obj self.BASE = 'http://atlas.obs-hp.fr/elodie/fE.cgi?' def ccf(self): \"\"\" Elodie Cross-Correlation Functions table", "\"\"\" self.obj = obj self.BASE = 'http://atlas.obs-hp.fr/elodie/fE.cgi?' def ccf(self): \"\"\" Elodie Cross-Correlation Functions", "req.content.decode('utf-8') lines = r.splitlines() valid_lines = [i for i in lines if i[0]!='#']", "lines if i[0]!='#'] cols = valid_lines[0].split(' ') data_lines = [i.split('\\t') for i in" ]
[ "DeleteSurvey(self, request, context): \"\"\"Delete a survey and its associated agent (if existent) \"\"\"", "not implemented!') raise NotImplementedError('Method not implemented!') def GetSurveyAnswers(self, request, context): \"\"\"Retrieve answers to", "request_deserializer=ondewo_dot_survey_dot_survey__pb2.DeleteSurveyRequest.FromString, response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, ), 'ListSurveys': grpc.unary_unary_rpc_method_handler( servicer.ListSurveys, request_deserializer=ondewo_dot_survey_dot_survey__pb2.ListSurveysRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.ListSurveysResponse.SerializeToString, ), 'GetSurveyAnswers': grpc.unary_unary_rpc_method_handler( servicer.GetSurveyAnswers, request_deserializer=ondewo_dot_survey_dot_survey__pb2.GetSurveyAnswersRequest.FromString,", "ondewo_dot_survey_dot_survey__pb2.AgentSurveyResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def DeleteAgentSurvey(request, target,", "grpc.unary_unary_rpc_method_handler( servicer.GetSurveyAnswers, request_deserializer=ondewo_dot_survey_dot_survey__pb2.GetSurveyAnswersRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.SurveyAnswersResponse.SerializeToString, ), 'GetAllSurveyAnswers': grpc.unary_unary_rpc_method_handler( servicer.GetAllSurveyAnswers, request_deserializer=ondewo_dot_survey_dot_survey__pb2.GetAllSurveyAnswersRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.SurveyAnswersResponse.SerializeToString, ), 'CreateAgentSurvey': grpc.unary_unary_rpc_method_handler(", "survey \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def add_SurveysServicer_to_server(servicer, server):", "options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def DeleteAgentSurvey(request, target, options=(),", "= channel.unary_unary( '/ondewo.survey.Surveys/UpdateSurvey', request_serializer=ondewo_dot_survey_dot_survey__pb2.UpdateSurveyRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.Survey.FromString, ) self.DeleteSurvey = channel.unary_unary( '/ondewo.survey.Surveys/DeleteSurvey', request_serializer=ondewo_dot_survey_dot_survey__pb2.DeleteSurveyRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, )", "\"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def CreateAgentSurvey(self, request, context):", "GetSurvey(self, request, context): \"\"\"Retrieve a Survey message from the Database and return it", "wait_for_ready, timeout, metadata) @staticmethod def UpdateAgentSurvey(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None,", "return it \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def DeleteSurvey(self,", "context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def GetAllSurveyAnswers(self, request, context): \"\"\"Retrieve all", "generic_handler = grpc.method_handlers_generic_handler( 'ondewo.survey.Surveys', rpc_method_handlers) server.add_generic_rpc_handlers((generic_handler,)) # This class is part of an", "implemented!') def CreateAgentSurvey(self, request, context): \"\"\"Populate and configures an NLU Agent from a", "ondewo_dot_survey_dot_survey__pb2.SurveyAnswersResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def CreateAgentSurvey(request, target,", "timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/UpdateSurvey', ondewo_dot_survey_dot_survey__pb2.UpdateSurveyRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.Survey.FromString, options, channel_credentials, insecure, call_credentials, compression,", "ondewo_dot_survey_dot_survey__pb2.GetAllSurveyAnswersRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.SurveyAnswersResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def CreateAgentSurvey(request,", "def ListSurveys(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request,", "response_deserializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyResponse.FromString, ) self.DeleteAgentSurvey = channel.unary_unary( '/ondewo.survey.Surveys/DeleteAgentSurvey', request_serializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) class SurveysServicer(object): \"\"\"///// Services", "Agent for it \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def", "'/ondewo.survey.Surveys/UpdateAgentSurvey', request_serializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyResponse.FromString, ) self.DeleteAgentSurvey = channel.unary_unary( '/ondewo.survey.Surveys/DeleteAgentSurvey', request_serializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) class SurveysServicer(object):", "SurveysServicer(object): \"\"\"///// Services /////// \"\"\" def CreateSurvey(self, request, context): \"\"\"Create a Survey and", "servicer.UpdateSurvey, request_deserializer=ondewo_dot_survey_dot_survey__pb2.UpdateSurveyRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.Survey.SerializeToString, ), 'DeleteSurvey': grpc.unary_unary_rpc_method_handler( servicer.DeleteSurvey, request_deserializer=ondewo_dot_survey_dot_survey__pb2.DeleteSurveyRequest.FromString, response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, ), 'ListSurveys': grpc.unary_unary_rpc_method_handler( servicer.ListSurveys,", "channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def UpdateSurvey(request, target, options=(), channel_credentials=None,", "from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 from ondewo.survey import survey_pb2 as ondewo_dot_survey_dot_survey__pb2 class", "\"\"\"///// Services /////// \"\"\" def CreateSurvey(self, request, context): \"\"\"Create a Survey and an", "'/ondewo.survey.Surveys/UpdateSurvey', ondewo_dot_survey_dot_survey__pb2.UpdateSurveyRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.Survey.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def", "options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def GetSurvey(request, target, options=(),", "CreateAgentSurvey(self, request, context): \"\"\"Populate and configures an NLU Agent from a Survey \"\"\"", "request_serializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) class SurveysServicer(object): \"\"\"///// Services /////// \"\"\" def CreateSurvey(self, request, context):", "NotImplementedError('Method not implemented!') def GetAllSurveyAnswers(self, request, context): \"\"\"Retrieve all answers to survey questions", "channel: A grpc.Channel. \"\"\" self.CreateSurvey = channel.unary_unary( '/ondewo.survey.Surveys/CreateSurvey', request_serializer=ondewo_dot_survey_dot_survey__pb2.CreateSurveyRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.Survey.FromString, ) self.GetSurvey =", "the list of all surveys in the server \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!')", "not implemented!') raise NotImplementedError('Method not implemented!') def CreateAgentSurvey(self, request, context): \"\"\"Populate and configures", "compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/CreateSurvey', ondewo_dot_survey_dot_survey__pb2.CreateSurveyRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.Survey.FromString, options, channel_credentials, insecure,", "implemented!') raise NotImplementedError('Method not implemented!') def UpdateSurvey(self, request, context): \"\"\"Update an existing Survey", "grpc.unary_unary_rpc_method_handler( servicer.UpdateAgentSurvey, request_deserializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyResponse.SerializeToString, ), 'DeleteAgentSurvey': grpc.unary_unary_rpc_method_handler( servicer.DeleteAgentSurvey, request_deserializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.FromString, response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, ), } generic_handler", "NLU agent from a survey \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not", "class Surveys(object): \"\"\"///// Services /////// \"\"\" @staticmethod def CreateSurvey(request, target, options=(), channel_credentials=None, call_credentials=None,", "def __init__(self, channel): \"\"\"Constructor. Args: channel: A grpc.Channel. \"\"\" self.CreateSurvey = channel.unary_unary( '/ondewo.survey.Surveys/CreateSurvey',", "insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/GetSurveyAnswers', ondewo_dot_survey_dot_survey__pb2.GetSurveyAnswersRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.SurveyAnswersResponse.FromString, options, channel_credentials,", "channel.unary_unary( '/ondewo.survey.Surveys/UpdateSurvey', request_serializer=ondewo_dot_survey_dot_survey__pb2.UpdateSurveyRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.Survey.FromString, ) self.DeleteSurvey = channel.unary_unary( '/ondewo.survey.Surveys/DeleteSurvey', request_serializer=ondewo_dot_survey_dot_survey__pb2.DeleteSurveyRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) self.ListSurveys", "context): \"\"\"Delete a survey and its associated agent (if existent) \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method", "target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/GetAllSurveyAnswers',", "server): rpc_method_handlers = { 'CreateSurvey': grpc.unary_unary_rpc_method_handler( servicer.CreateSurvey, request_deserializer=ondewo_dot_survey_dot_survey__pb2.CreateSurveyRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.Survey.SerializeToString, ), 'GetSurvey': grpc.unary_unary_rpc_method_handler( servicer.GetSurvey,", "'/ondewo.survey.Surveys/GetSurveyAnswers', ondewo_dot_survey_dot_survey__pb2.GetSurveyAnswersRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.SurveyAnswersResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def", "target, '/ondewo.survey.Surveys/GetSurveyAnswers', ondewo_dot_survey_dot_survey__pb2.GetSurveyAnswersRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.SurveyAnswersResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod", "EXPERIMENTAL API. class Surveys(object): \"\"\"///// Services /////// \"\"\" @staticmethod def CreateSurvey(request, target, options=(),", "'DeleteAgentSurvey': grpc.unary_unary_rpc_method_handler( servicer.DeleteAgentSurvey, request_deserializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.FromString, response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( 'ondewo.survey.Surveys', rpc_method_handlers) server.add_generic_rpc_handlers((generic_handler,))", "NotImplementedError('Method not implemented!') def UpdateSurvey(self, request, context): \"\"\"Update an existing Survey message from", "request_deserializer=ondewo_dot_survey_dot_survey__pb2.ListSurveysRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.ListSurveysResponse.SerializeToString, ), 'GetSurveyAnswers': grpc.unary_unary_rpc_method_handler( servicer.GetSurveyAnswers, request_deserializer=ondewo_dot_survey_dot_survey__pb2.GetSurveyAnswersRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.SurveyAnswersResponse.SerializeToString, ), 'GetAllSurveyAnswers': grpc.unary_unary_rpc_method_handler( servicer.GetAllSurveyAnswers, request_deserializer=ondewo_dot_survey_dot_survey__pb2.GetAllSurveyAnswersRequest.FromString,", "), } generic_handler = grpc.method_handlers_generic_handler( 'ondewo.survey.Surveys', rpc_method_handlers) server.add_generic_rpc_handlers((generic_handler,)) # This class is part", "response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) class SurveysServicer(object): \"\"\"///// Services /////// \"\"\" def CreateSurvey(self, request, context): \"\"\"Create", "any session \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def CreateAgentSurvey(self,", "grpc.Channel. \"\"\" self.CreateSurvey = channel.unary_unary( '/ondewo.survey.Surveys/CreateSurvey', request_serializer=ondewo_dot_survey_dot_survey__pb2.CreateSurveyRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.Survey.FromString, ) self.GetSurvey = channel.unary_unary( '/ondewo.survey.Surveys/GetSurvey',", "\"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def DeleteSurvey(self, request, context):", "), 'UpdateAgentSurvey': grpc.unary_unary_rpc_method_handler( servicer.UpdateAgentSurvey, request_deserializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyResponse.SerializeToString, ), 'DeleteAgentSurvey': grpc.unary_unary_rpc_method_handler( servicer.DeleteAgentSurvey, request_deserializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.FromString, response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, ),", "} generic_handler = grpc.method_handlers_generic_handler( 'ondewo.survey.Surveys', rpc_method_handlers) server.add_generic_rpc_handlers((generic_handler,)) # This class is part of", "list of all surveys in the server \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise", "collected in interactions with a survey agent in any session \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method", "def DeleteAgentSurvey(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request,", "servicer.UpdateAgentSurvey, request_deserializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyResponse.SerializeToString, ), 'DeleteAgentSurvey': grpc.unary_unary_rpc_method_handler( servicer.DeleteAgentSurvey, request_deserializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.FromString, response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, ), } generic_handler =", "server.add_generic_rpc_handlers((generic_handler,)) # This class is part of an EXPERIMENTAL API. class Surveys(object): \"\"\"/////", "message from the Database and return it \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise", "def CreateSurvey(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request,", "as google_dot_protobuf_dot_empty__pb2 from ondewo.survey import survey_pb2 as ondewo_dot_survey_dot_survey__pb2 class SurveysStub(object): \"\"\"///// Services ///////", "and an empty NLU Agent for it \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise", "context): \"\"\"Retrieve answers to survey questions collected in interactions with a survey agent", ") self.CreateAgentSurvey = channel.unary_unary( '/ondewo.survey.Surveys/CreateAgentSurvey', request_serializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyResponse.FromString, ) self.UpdateAgentSurvey = channel.unary_unary( '/ondewo.survey.Surveys/UpdateAgentSurvey', request_serializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.SerializeToString,", "DeleteAgentSurvey(self, request, context): \"\"\"Deletes all data of an NLU agent associated to a", "wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/GetSurvey', ondewo_dot_survey_dot_survey__pb2.GetSurveyRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.Survey.FromString, options, channel_credentials, insecure, call_credentials,", "context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def UpdateSurvey(self, request, context): \"\"\"Update an", "request_deserializer=ondewo_dot_survey_dot_survey__pb2.GetSurveyAnswersRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.SurveyAnswersResponse.SerializeToString, ), 'GetAllSurveyAnswers': grpc.unary_unary_rpc_method_handler( servicer.GetAllSurveyAnswers, request_deserializer=ondewo_dot_survey_dot_survey__pb2.GetAllSurveyAnswersRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.SurveyAnswersResponse.SerializeToString, ), 'CreateAgentSurvey': grpc.unary_unary_rpc_method_handler( servicer.CreateAgentSurvey, request_deserializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.FromString,", "options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def ListSurveys(request, target, options=(),", "not implemented!') raise NotImplementedError('Method not implemented!') def DeleteSurvey(self, request, context): \"\"\"Delete a survey", "a Survey message from the Database and return it \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not", "request, context): \"\"\"Returns the list of all surveys in the server \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED)", "'CreateAgentSurvey': grpc.unary_unary_rpc_method_handler( servicer.CreateAgentSurvey, request_deserializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyResponse.SerializeToString, ), 'UpdateAgentSurvey': grpc.unary_unary_rpc_method_handler( servicer.UpdateAgentSurvey, request_deserializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyResponse.SerializeToString, ), 'DeleteAgentSurvey':", "ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.AgentSurveyResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def UpdateAgentSurvey(request,", "Services /////// \"\"\" def __init__(self, channel): \"\"\"Constructor. Args: channel: A grpc.Channel. \"\"\" self.CreateSurvey", "request, context): \"\"\"Populate and configures an NLU Agent from a Survey \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED)", "\"\"\"///// Services /////// \"\"\" @staticmethod def CreateSurvey(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None,", "target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/UpdateSurvey',", "'/ondewo.survey.Surveys/GetAllSurveyAnswers', request_serializer=ondewo_dot_survey_dot_survey__pb2.GetAllSurveyAnswersRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.SurveyAnswersResponse.FromString, ) self.CreateAgentSurvey = channel.unary_unary( '/ondewo.survey.Surveys/CreateAgentSurvey', request_serializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyResponse.FromString, ) self.UpdateAgentSurvey =", "call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def DeleteSurvey(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False,", "request_deserializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyResponse.SerializeToString, ), 'UpdateAgentSurvey': grpc.unary_unary_rpc_method_handler( servicer.UpdateAgentSurvey, request_deserializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyResponse.SerializeToString, ), 'DeleteAgentSurvey': grpc.unary_unary_rpc_method_handler( servicer.DeleteAgentSurvey, request_deserializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.FromString,", "insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/UpdateAgentSurvey', ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.AgentSurveyResponse.FromString, options, channel_credentials,", "context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def DeleteSurvey(self, request, context): \"\"\"Delete a", "'/ondewo.survey.Surveys/ListSurveys', request_serializer=ondewo_dot_survey_dot_survey__pb2.ListSurveysRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.ListSurveysResponse.FromString, ) self.GetSurveyAnswers = channel.unary_unary( '/ondewo.survey.Surveys/GetSurveyAnswers', request_serializer=ondewo_dot_survey_dot_survey__pb2.GetSurveyAnswersRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.SurveyAnswersResponse.FromString, ) self.GetAllSurveyAnswers =", "@staticmethod def CreateSurvey(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return", "self.DeleteAgentSurvey = channel.unary_unary( '/ondewo.survey.Surveys/DeleteAgentSurvey', request_serializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) class SurveysServicer(object): \"\"\"///// Services /////// \"\"\"", "NLU Agent from a Survey \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not", "= channel.unary_unary( '/ondewo.survey.Surveys/UpdateAgentSurvey', request_serializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyResponse.FromString, ) self.DeleteAgentSurvey = channel.unary_unary( '/ondewo.survey.Surveys/DeleteAgentSurvey', request_serializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, )", "This class is part of an EXPERIMENTAL API. class Surveys(object): \"\"\"///// Services ///////", "to protobuf-defined services.\"\"\" import grpc from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 from ondewo.survey", "compression, wait_for_ready, timeout, metadata) @staticmethod def CreateAgentSurvey(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None,", "channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/GetSurveyAnswers', ondewo_dot_survey_dot_survey__pb2.GetSurveyAnswersRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.SurveyAnswersResponse.FromString,", "of an NLU agent associated to a survey \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!')", "grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/CreateAgentSurvey', ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.AgentSurveyResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata)", "self.CreateSurvey = channel.unary_unary( '/ondewo.survey.Surveys/CreateSurvey', request_serializer=ondewo_dot_survey_dot_survey__pb2.CreateSurveyRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.Survey.FromString, ) self.GetSurvey = channel.unary_unary( '/ondewo.survey.Surveys/GetSurvey', request_serializer=ondewo_dot_survey_dot_survey__pb2.GetSurveyRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.Survey.FromString,", "implemented!') raise NotImplementedError('Method not implemented!') def CreateAgentSurvey(self, request, context): \"\"\"Populate and configures an", "context): \"\"\"Retrieve all answers to survey questions collected in interactions with a survey", "raise NotImplementedError('Method not implemented!') def GetAllSurveyAnswers(self, request, context): \"\"\"Retrieve all answers to survey", "Survey message from the Database and return it \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!')", "call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def GetSurveyAnswers(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False,", "insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def UpdateSurvey(request, target, options=(), channel_credentials=None, call_credentials=None,", "\"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def DeleteAgentSurvey(self, request, context):", "an NLU Agent from a Survey \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method", "compression, wait_for_ready, timeout, metadata) @staticmethod def UpdateAgentSurvey(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None,", "plugin. DO NOT EDIT! \"\"\"Client and server classes corresponding to protobuf-defined services.\"\"\" import", "'/ondewo.survey.Surveys/CreateSurvey', request_serializer=ondewo_dot_survey_dot_survey__pb2.CreateSurveyRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.Survey.FromString, ) self.GetSurvey = channel.unary_unary( '/ondewo.survey.Surveys/GetSurvey', request_serializer=ondewo_dot_survey_dot_survey__pb2.GetSurveyRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.Survey.FromString, ) self.UpdateSurvey =", "import empty_pb2 as google_dot_protobuf_dot_empty__pb2 from ondewo.survey import survey_pb2 as ondewo_dot_survey_dot_survey__pb2 class SurveysStub(object): \"\"\"/////", "interactions with a survey agent for a specific session \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not", "and its associated agent (if existent) \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method", "= channel.unary_unary( '/ondewo.survey.Surveys/GetSurvey', request_serializer=ondewo_dot_survey_dot_survey__pb2.GetSurveyRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.Survey.FromString, ) self.UpdateSurvey = channel.unary_unary( '/ondewo.survey.Surveys/UpdateSurvey', request_serializer=ondewo_dot_survey_dot_survey__pb2.UpdateSurveyRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.Survey.FromString, )", "it \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def GetSurvey(self, request,", "a Survey \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def UpdateAgentSurvey(self,", "from a survey \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def", "servicer.CreateSurvey, request_deserializer=ondewo_dot_survey_dot_survey__pb2.CreateSurveyRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.Survey.SerializeToString, ), 'GetSurvey': grpc.unary_unary_rpc_method_handler( servicer.GetSurvey, request_deserializer=ondewo_dot_survey_dot_survey__pb2.GetSurveyRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.Survey.SerializeToString, ), 'UpdateSurvey': grpc.unary_unary_rpc_method_handler( servicer.UpdateSurvey,", "ondewo_dot_survey_dot_survey__pb2.CreateSurveyRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.Survey.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def GetSurvey(request,", "(if existent) \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def ListSurveys(self,", "options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def GetAllSurveyAnswers(request, target, options=(),", "implemented!') def DeleteAgentSurvey(self, request, context): \"\"\"Deletes all data of an NLU agent associated", "grpc.unary_unary_rpc_method_handler( servicer.GetAllSurveyAnswers, request_deserializer=ondewo_dot_survey_dot_survey__pb2.GetAllSurveyAnswersRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.SurveyAnswersResponse.SerializeToString, ), 'CreateAgentSurvey': grpc.unary_unary_rpc_method_handler( servicer.CreateAgentSurvey, request_deserializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyResponse.SerializeToString, ), 'UpdateAgentSurvey': grpc.unary_unary_rpc_method_handler(", "options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def GetSurveyAnswers(request, target, options=(),", "GetSurveyAnswers(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target,", "'/ondewo.survey.Surveys/UpdateSurvey', request_serializer=ondewo_dot_survey_dot_survey__pb2.UpdateSurveyRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.Survey.FromString, ) self.DeleteSurvey = channel.unary_unary( '/ondewo.survey.Surveys/DeleteSurvey', request_serializer=ondewo_dot_survey_dot_survey__pb2.DeleteSurveyRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) self.ListSurveys =", "compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/ListSurveys', ondewo_dot_survey_dot_survey__pb2.ListSurveysRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.ListSurveysResponse.FromString, options, channel_credentials, insecure,", ") self.UpdateAgentSurvey = channel.unary_unary( '/ondewo.survey.Surveys/UpdateAgentSurvey', request_serializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyResponse.FromString, ) self.DeleteAgentSurvey = channel.unary_unary( '/ondewo.survey.Surveys/DeleteAgentSurvey', request_serializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.SerializeToString,", "Database and return it \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!')", "ListSurveys(self, request, context): \"\"\"Returns the list of all surveys in the server \"\"\"", "call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/GetSurvey', ondewo_dot_survey_dot_survey__pb2.GetSurveyRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.Survey.FromString, options,", "all answers to survey questions collected in interactions with a survey agent in", "{ 'CreateSurvey': grpc.unary_unary_rpc_method_handler( servicer.CreateSurvey, request_deserializer=ondewo_dot_survey_dot_survey__pb2.CreateSurveyRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.Survey.SerializeToString, ), 'GetSurvey': grpc.unary_unary_rpc_method_handler( servicer.GetSurvey, request_deserializer=ondewo_dot_survey_dot_survey__pb2.GetSurveyRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.Survey.SerializeToString, ),", "), 'DeleteAgentSurvey': grpc.unary_unary_rpc_method_handler( servicer.DeleteAgentSurvey, request_deserializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.FromString, response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( 'ondewo.survey.Surveys', rpc_method_handlers)", "response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( 'ondewo.survey.Surveys', rpc_method_handlers) server.add_generic_rpc_handlers((generic_handler,)) # This class is", "call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/UpdateSurvey', ondewo_dot_survey_dot_survey__pb2.UpdateSurveyRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.Survey.FromString, options,", "'ondewo.survey.Surveys', rpc_method_handlers) server.add_generic_rpc_handlers((generic_handler,)) # This class is part of an EXPERIMENTAL API. class", "target, '/ondewo.survey.Surveys/GetAllSurveyAnswers', ondewo_dot_survey_dot_survey__pb2.GetAllSurveyAnswersRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.SurveyAnswersResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod", "request_deserializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.FromString, response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( 'ondewo.survey.Surveys', rpc_method_handlers) server.add_generic_rpc_handlers((generic_handler,)) # This class", "from ondewo.survey import survey_pb2 as ondewo_dot_survey_dot_survey__pb2 class SurveysStub(object): \"\"\"///// Services /////// \"\"\" def", "configures an NLU Agent from a Survey \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise", "not implemented!') raise NotImplementedError('Method not implemented!') def UpdateAgentSurvey(self, request, context): \"\"\"Update an NLU", "'UpdateAgentSurvey': grpc.unary_unary_rpc_method_handler( servicer.UpdateAgentSurvey, request_deserializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyResponse.SerializeToString, ), 'DeleteAgentSurvey': grpc.unary_unary_rpc_method_handler( servicer.DeleteAgentSurvey, request_deserializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.FromString, response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, ), }", "= { 'CreateSurvey': grpc.unary_unary_rpc_method_handler( servicer.CreateSurvey, request_deserializer=ondewo_dot_survey_dot_survey__pb2.CreateSurveyRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.Survey.SerializeToString, ), 'GetSurvey': grpc.unary_unary_rpc_method_handler( servicer.GetSurvey, request_deserializer=ondewo_dot_survey_dot_survey__pb2.GetSurveyRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.Survey.SerializeToString,", "'/ondewo.survey.Surveys/GetSurveyAnswers', request_serializer=ondewo_dot_survey_dot_survey__pb2.GetSurveyAnswersRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.SurveyAnswersResponse.FromString, ) self.GetAllSurveyAnswers = channel.unary_unary( '/ondewo.survey.Surveys/GetAllSurveyAnswers', request_serializer=ondewo_dot_survey_dot_survey__pb2.GetAllSurveyAnswersRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.SurveyAnswersResponse.FromString, ) self.CreateAgentSurvey =", "not implemented!') def DeleteSurvey(self, request, context): \"\"\"Delete a survey and its associated agent", ") self.DeleteAgentSurvey = channel.unary_unary( '/ondewo.survey.Surveys/DeleteAgentSurvey', request_serializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) class SurveysServicer(object): \"\"\"///// Services ///////", "options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/DeleteAgentSurvey', ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.SerializeToString,", "@staticmethod def GetAllSurveyAnswers(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return", "a survey agent for a specific session \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise", "def GetAllSurveyAnswers(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request,", "answers to survey questions collected in interactions with a survey agent in any", "request_serializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyResponse.FromString, ) self.UpdateAgentSurvey = channel.unary_unary( '/ondewo.survey.Surveys/UpdateAgentSurvey', request_serializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyResponse.FromString, ) self.DeleteAgentSurvey = channel.unary_unary(", "as ondewo_dot_survey_dot_survey__pb2 class SurveysStub(object): \"\"\"///// Services /////// \"\"\" def __init__(self, channel): \"\"\"Constructor. Args:", "data of an NLU agent associated to a survey \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not", "empty NLU Agent for it \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not", "= channel.unary_unary( '/ondewo.survey.Surveys/CreateAgentSurvey', request_serializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyResponse.FromString, ) self.UpdateAgentSurvey = channel.unary_unary( '/ondewo.survey.Surveys/UpdateAgentSurvey', request_serializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyResponse.FromString, )", "wait_for_ready, timeout, metadata) @staticmethod def GetAllSurveyAnswers(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None,", "a survey and its associated agent (if existent) \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!')", "\"\"\" @staticmethod def CreateSurvey(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None):", "channel.unary_unary( '/ondewo.survey.Surveys/GetSurvey', request_serializer=ondewo_dot_survey_dot_survey__pb2.GetSurveyRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.Survey.FromString, ) self.UpdateSurvey = channel.unary_unary( '/ondewo.survey.Surveys/UpdateSurvey', request_serializer=ondewo_dot_survey_dot_survey__pb2.UpdateSurveyRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.Survey.FromString, ) self.DeleteSurvey", "self.UpdateAgentSurvey = channel.unary_unary( '/ondewo.survey.Surveys/UpdateAgentSurvey', request_serializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyResponse.FromString, ) self.DeleteAgentSurvey = channel.unary_unary( '/ondewo.survey.Surveys/DeleteAgentSurvey', request_serializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,", "with a survey agent for a specific session \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!')", "of all surveys in the server \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method", "options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/CreateAgentSurvey', ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.SerializeToString,", "\"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def GetSurvey(self, request, context):", "metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/UpdateSurvey', ondewo_dot_survey_dot_survey__pb2.UpdateSurveyRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.Survey.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready,", "protobuf-defined services.\"\"\" import grpc from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 from ondewo.survey import", "timeout, metadata) @staticmethod def DeleteAgentSurvey(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None,", "\"\"\"Retrieve answers to survey questions collected in interactions with a survey agent for", "implemented!') raise NotImplementedError('Method not implemented!') def add_SurveysServicer_to_server(servicer, server): rpc_method_handlers = { 'CreateSurvey': grpc.unary_unary_rpc_method_handler(", "channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def DeleteAgentSurvey(request, target, options=(), channel_credentials=None,", "NotImplementedError('Method not implemented!') def GetSurvey(self, request, context): \"\"\"Retrieve a Survey message from the", "call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/GetAllSurveyAnswers', ondewo_dot_survey_dot_survey__pb2.GetAllSurveyAnswersRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.SurveyAnswersResponse.FromString, options,", "DeleteAgentSurvey(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target,", "\"\"\"Create a Survey and an empty NLU Agent for it \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method", "survey agent for a specific session \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method", "insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/DeleteSurvey', ondewo_dot_survey_dot_survey__pb2.DeleteSurveyRequest.SerializeToString, google_dot_protobuf_dot_empty__pb2.Empty.FromString, options, channel_credentials,", "implemented!') raise NotImplementedError('Method not implemented!') def UpdateAgentSurvey(self, request, context): \"\"\"Update an NLU agent", "class is part of an EXPERIMENTAL API. class Surveys(object): \"\"\"///// Services /////// \"\"\"", "ondewo_dot_survey_dot_survey__pb2 class SurveysStub(object): \"\"\"///// Services /////// \"\"\" def __init__(self, channel): \"\"\"Constructor. Args: channel:", "channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def GetSurvey(request, target, options=(), channel_credentials=None,", "'/ondewo.survey.Surveys/UpdateAgentSurvey', ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.AgentSurveyResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def", "options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/GetSurveyAnswers', ondewo_dot_survey_dot_survey__pb2.GetSurveyAnswersRequest.SerializeToString,", "surveys in the server \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!')", "grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/ListSurveys', ondewo_dot_survey_dot_survey__pb2.ListSurveysRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.ListSurveysResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata)", "request_serializer=ondewo_dot_survey_dot_survey__pb2.UpdateSurveyRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.Survey.FromString, ) self.DeleteSurvey = channel.unary_unary( '/ondewo.survey.Surveys/DeleteSurvey', request_serializer=ondewo_dot_survey_dot_survey__pb2.DeleteSurveyRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) self.ListSurveys = channel.unary_unary(", "request, context): \"\"\"Delete a survey and its associated agent (if existent) \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED)", "gRPC Python protocol compiler plugin. DO NOT EDIT! \"\"\"Client and server classes corresponding", "agent for a specific session \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not", "self.GetSurveyAnswers = channel.unary_unary( '/ondewo.survey.Surveys/GetSurveyAnswers', request_serializer=ondewo_dot_survey_dot_survey__pb2.GetSurveyAnswersRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.SurveyAnswersResponse.FromString, ) self.GetAllSurveyAnswers = channel.unary_unary( '/ondewo.survey.Surveys/GetAllSurveyAnswers', request_serializer=ondewo_dot_survey_dot_survey__pb2.GetAllSurveyAnswersRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.SurveyAnswersResponse.FromString,", "of an EXPERIMENTAL API. class Surveys(object): \"\"\"///// Services /////// \"\"\" @staticmethod def CreateSurvey(request,", "\"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def GetSurveyAnswers(self, request, context):", "context): \"\"\"Returns the list of all surveys in the server \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method", "not implemented!') raise NotImplementedError('Method not implemented!') def ListSurveys(self, request, context): \"\"\"Returns the list", "implemented!') raise NotImplementedError('Method not implemented!') def DeleteAgentSurvey(self, request, context): \"\"\"Deletes all data of", "Survey \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def UpdateAgentSurvey(self, request,", "ondewo_dot_survey_dot_survey__pb2.Survey.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def DeleteSurvey(request, target,", "request_serializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyResponse.FromString, ) self.DeleteAgentSurvey = channel.unary_unary( '/ondewo.survey.Surveys/DeleteAgentSurvey', request_serializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) class SurveysServicer(object): \"\"\"/////", "insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def DeleteSurvey(request, target, options=(), channel_credentials=None, call_credentials=None,", "questions collected in interactions with a survey agent for a specific session \"\"\"", "metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/GetSurvey', ondewo_dot_survey_dot_survey__pb2.GetSurveyRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.Survey.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready,", "CreateSurvey(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target,", "wait_for_ready, timeout, metadata) @staticmethod def UpdateSurvey(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None,", "response_serializer=ondewo_dot_survey_dot_survey__pb2.ListSurveysResponse.SerializeToString, ), 'GetSurveyAnswers': grpc.unary_unary_rpc_method_handler( servicer.GetSurveyAnswers, request_deserializer=ondewo_dot_survey_dot_survey__pb2.GetSurveyAnswersRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.SurveyAnswersResponse.SerializeToString, ), 'GetAllSurveyAnswers': grpc.unary_unary_rpc_method_handler( servicer.GetAllSurveyAnswers, request_deserializer=ondewo_dot_survey_dot_survey__pb2.GetAllSurveyAnswersRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.SurveyAnswersResponse.SerializeToString,", "return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/CreateAgentSurvey', ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.AgentSurveyResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout,", "Agent from a Survey \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!')", "to a survey \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def", "def UpdateSurvey(self, request, context): \"\"\"Update an existing Survey message from the Database and", "'GetSurvey': grpc.unary_unary_rpc_method_handler( servicer.GetSurvey, request_deserializer=ondewo_dot_survey_dot_survey__pb2.GetSurveyRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.Survey.SerializeToString, ), 'UpdateSurvey': grpc.unary_unary_rpc_method_handler( servicer.UpdateSurvey, request_deserializer=ondewo_dot_survey_dot_survey__pb2.UpdateSurveyRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.Survey.SerializeToString, ), 'DeleteSurvey':", "__init__(self, channel): \"\"\"Constructor. Args: channel: A grpc.Channel. \"\"\" self.CreateSurvey = channel.unary_unary( '/ondewo.survey.Surveys/CreateSurvey', request_serializer=ondewo_dot_survey_dot_survey__pb2.CreateSurveyRequest.SerializeToString,", "implemented!') def add_SurveysServicer_to_server(servicer, server): rpc_method_handlers = { 'CreateSurvey': grpc.unary_unary_rpc_method_handler( servicer.CreateSurvey, request_deserializer=ondewo_dot_survey_dot_survey__pb2.CreateSurveyRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.Survey.SerializeToString, ),", "context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def ListSurveys(self, request, context): \"\"\"Returns the", "implemented!') raise NotImplementedError('Method not implemented!') def GetSurveyAnswers(self, request, context): \"\"\"Retrieve answers to survey", "\"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def GetAllSurveyAnswers(self, request, context):", "rpc_method_handlers) server.add_generic_rpc_handlers((generic_handler,)) # This class is part of an EXPERIMENTAL API. class Surveys(object):", "raise NotImplementedError('Method not implemented!') def CreateAgentSurvey(self, request, context): \"\"\"Populate and configures an NLU", "request_deserializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyResponse.SerializeToString, ), 'DeleteAgentSurvey': grpc.unary_unary_rpc_method_handler( servicer.DeleteAgentSurvey, request_deserializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.FromString, response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler(", "timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/DeleteSurvey', ondewo_dot_survey_dot_survey__pb2.DeleteSurveyRequest.SerializeToString, google_dot_protobuf_dot_empty__pb2.Empty.FromString, options, channel_credentials, insecure, call_credentials, compression,", "ondewo_dot_survey_dot_survey__pb2.DeleteSurveyRequest.SerializeToString, google_dot_protobuf_dot_empty__pb2.Empty.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def ListSurveys(request,", "request_deserializer=ondewo_dot_survey_dot_survey__pb2.GetSurveyRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.Survey.SerializeToString, ), 'UpdateSurvey': grpc.unary_unary_rpc_method_handler( servicer.UpdateSurvey, request_deserializer=ondewo_dot_survey_dot_survey__pb2.UpdateSurveyRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.Survey.SerializeToString, ), 'DeleteSurvey': grpc.unary_unary_rpc_method_handler( servicer.DeleteSurvey, request_deserializer=ondewo_dot_survey_dot_survey__pb2.DeleteSurveyRequest.FromString,", "= channel.unary_unary( '/ondewo.survey.Surveys/GetSurveyAnswers', request_serializer=ondewo_dot_survey_dot_survey__pb2.GetSurveyAnswersRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.SurveyAnswersResponse.FromString, ) self.GetAllSurveyAnswers = channel.unary_unary( '/ondewo.survey.Surveys/GetAllSurveyAnswers', request_serializer=ondewo_dot_survey_dot_survey__pb2.GetAllSurveyAnswersRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.SurveyAnswersResponse.FromString, )", "google_dot_protobuf_dot_empty__pb2.Empty.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def ListSurveys(request, target,", "timeout, metadata) @staticmethod def CreateAgentSurvey(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None,", "'ListSurveys': grpc.unary_unary_rpc_method_handler( servicer.ListSurveys, request_deserializer=ondewo_dot_survey_dot_survey__pb2.ListSurveysRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.ListSurveysResponse.SerializeToString, ), 'GetSurveyAnswers': grpc.unary_unary_rpc_method_handler( servicer.GetSurveyAnswers, request_deserializer=ondewo_dot_survey_dot_survey__pb2.GetSurveyAnswersRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.SurveyAnswersResponse.SerializeToString, ), 'GetAllSurveyAnswers':", "wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/GetSurveyAnswers', ondewo_dot_survey_dot_survey__pb2.GetSurveyAnswersRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.SurveyAnswersResponse.FromString, options, channel_credentials, insecure, call_credentials,", ") class SurveysServicer(object): \"\"\"///// Services /////// \"\"\" def CreateSurvey(self, request, context): \"\"\"Create a", "context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def UpdateAgentSurvey(self, request, context): \"\"\"Update", "the Database and return it \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not", "return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/GetSurveyAnswers', ondewo_dot_survey_dot_survey__pb2.GetSurveyAnswersRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.SurveyAnswersResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout,", "request, context): \"\"\"Update an NLU agent from a survey \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not", "insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/CreateAgentSurvey', ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.AgentSurveyResponse.FromString, options, channel_credentials,", "by the gRPC Python protocol compiler plugin. DO NOT EDIT! \"\"\"Client and server", "\"\"\" def CreateSurvey(self, request, context): \"\"\"Create a Survey and an empty NLU Agent", "return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/GetAllSurveyAnswers', ondewo_dot_survey_dot_survey__pb2.GetAllSurveyAnswersRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.SurveyAnswersResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout,", "context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def GetAllSurveyAnswers(self, request, context): \"\"\"Retrieve", "protocol compiler plugin. DO NOT EDIT! \"\"\"Client and server classes corresponding to protobuf-defined", "grpc.unary_unary_rpc_method_handler( servicer.CreateAgentSurvey, request_deserializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyResponse.SerializeToString, ), 'UpdateAgentSurvey': grpc.unary_unary_rpc_method_handler( servicer.UpdateAgentSurvey, request_deserializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyResponse.SerializeToString, ), 'DeleteAgentSurvey': grpc.unary_unary_rpc_method_handler(", "channel.unary_unary( '/ondewo.survey.Surveys/ListSurveys', request_serializer=ondewo_dot_survey_dot_survey__pb2.ListSurveysRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.ListSurveysResponse.FromString, ) self.GetSurveyAnswers = channel.unary_unary( '/ondewo.survey.Surveys/GetSurveyAnswers', request_serializer=ondewo_dot_survey_dot_survey__pb2.GetSurveyAnswersRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.SurveyAnswersResponse.FromString, ) self.GetAllSurveyAnswers", "\"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def ListSurveys(self, request, context):", "its associated agent (if existent) \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not", "all surveys in the server \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not", "answers to survey questions collected in interactions with a survey agent for a", "survey questions collected in interactions with a survey agent for a specific session", "implemented!') def ListSurveys(self, request, context): \"\"\"Returns the list of all surveys in the", "= grpc.method_handlers_generic_handler( 'ondewo.survey.Surveys', rpc_method_handlers) server.add_generic_rpc_handlers((generic_handler,)) # This class is part of an EXPERIMENTAL", ") self.ListSurveys = channel.unary_unary( '/ondewo.survey.Surveys/ListSurveys', request_serializer=ondewo_dot_survey_dot_survey__pb2.ListSurveysRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.ListSurveysResponse.FromString, ) self.GetSurveyAnswers = channel.unary_unary( '/ondewo.survey.Surveys/GetSurveyAnswers', request_serializer=ondewo_dot_survey_dot_survey__pb2.GetSurveyAnswersRequest.SerializeToString,", "to survey questions collected in interactions with a survey agent in any session", "context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def ListSurveys(self, request, context): \"\"\"Returns", "compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/GetSurvey', ondewo_dot_survey_dot_survey__pb2.GetSurveyRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.Survey.FromString, options, channel_credentials, insecure,", "timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/UpdateAgentSurvey', ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.AgentSurveyResponse.FromString, options, channel_credentials, insecure, call_credentials, compression,", "insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/UpdateSurvey', ondewo_dot_survey_dot_survey__pb2.UpdateSurveyRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.Survey.FromString, options, channel_credentials,", "timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/DeleteAgentSurvey', ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.SerializeToString, google_dot_protobuf_dot_empty__pb2.Empty.FromString, options, channel_credentials, insecure, call_credentials, compression,", "self.ListSurveys = channel.unary_unary( '/ondewo.survey.Surveys/ListSurveys', request_serializer=ondewo_dot_survey_dot_survey__pb2.ListSurveysRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.ListSurveysResponse.FromString, ) self.GetSurveyAnswers = channel.unary_unary( '/ondewo.survey.Surveys/GetSurveyAnswers', request_serializer=ondewo_dot_survey_dot_survey__pb2.GetSurveyAnswersRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.SurveyAnswersResponse.FromString,", "response_serializer=ondewo_dot_survey_dot_survey__pb2.Survey.SerializeToString, ), 'GetSurvey': grpc.unary_unary_rpc_method_handler( servicer.GetSurvey, request_deserializer=ondewo_dot_survey_dot_survey__pb2.GetSurveyRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.Survey.SerializeToString, ), 'UpdateSurvey': grpc.unary_unary_rpc_method_handler( servicer.UpdateSurvey, request_deserializer=ondewo_dot_survey_dot_survey__pb2.UpdateSurveyRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.Survey.SerializeToString,", "metadata) @staticmethod def UpdateAgentSurvey(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None):", "implemented!') def UpdateSurvey(self, request, context): \"\"\"Update an existing Survey message from the Database", "Services /////// \"\"\" @staticmethod def CreateSurvey(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None,", "channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def UpdateAgentSurvey(request, target, options=(), channel_credentials=None,", "), 'DeleteSurvey': grpc.unary_unary_rpc_method_handler( servicer.DeleteSurvey, request_deserializer=ondewo_dot_survey_dot_survey__pb2.DeleteSurveyRequest.FromString, response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, ), 'ListSurveys': grpc.unary_unary_rpc_method_handler( servicer.ListSurveys, request_deserializer=ondewo_dot_survey_dot_survey__pb2.ListSurveysRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.ListSurveysResponse.SerializeToString, ),", "NotImplementedError('Method not implemented!') def GetSurveyAnswers(self, request, context): \"\"\"Retrieve answers to survey questions collected", "call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/DeleteAgentSurvey', ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.SerializeToString, google_dot_protobuf_dot_empty__pb2.Empty.FromString, options,", "context): \"\"\"Retrieve a Survey message from the Database and return it \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED)", "request_serializer=ondewo_dot_survey_dot_survey__pb2.GetSurveyAnswersRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.SurveyAnswersResponse.FromString, ) self.GetAllSurveyAnswers = channel.unary_unary( '/ondewo.survey.Surveys/GetAllSurveyAnswers', request_serializer=ondewo_dot_survey_dot_survey__pb2.GetAllSurveyAnswersRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.SurveyAnswersResponse.FromString, ) self.CreateAgentSurvey = channel.unary_unary(", "def CreateAgentSurvey(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request,", "timeout, metadata) @staticmethod def GetSurvey(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None,", "response_deserializer=ondewo_dot_survey_dot_survey__pb2.Survey.FromString, ) self.DeleteSurvey = channel.unary_unary( '/ondewo.survey.Surveys/DeleteSurvey', request_serializer=ondewo_dot_survey_dot_survey__pb2.DeleteSurveyRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) self.ListSurveys = channel.unary_unary( '/ondewo.survey.Surveys/ListSurveys',", "grpc.method_handlers_generic_handler( 'ondewo.survey.Surveys', rpc_method_handlers) server.add_generic_rpc_handlers((generic_handler,)) # This class is part of an EXPERIMENTAL API.", "timeout, metadata) @staticmethod def GetSurveyAnswers(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None,", "it \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def DeleteSurvey(self, request,", "@staticmethod def GetSurveyAnswers(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return", "target, '/ondewo.survey.Surveys/CreateSurvey', ondewo_dot_survey_dot_survey__pb2.CreateSurveyRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.Survey.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod", "target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/UpdateAgentSurvey',", "target, '/ondewo.survey.Surveys/GetSurvey', ondewo_dot_survey_dot_survey__pb2.GetSurveyRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.Survey.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod", "options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def CreateAgentSurvey(request, target, options=(),", "\"\"\"Populate and configures an NLU Agent from a Survey \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not", "metadata) @staticmethod def GetSurvey(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None):", "server classes corresponding to protobuf-defined services.\"\"\" import grpc from google.protobuf import empty_pb2 as", "in interactions with a survey agent for a specific session \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method", "to survey questions collected in interactions with a survey agent for a specific", "a survey agent in any session \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method", "DeleteSurvey(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target,", "'GetAllSurveyAnswers': grpc.unary_unary_rpc_method_handler( servicer.GetAllSurveyAnswers, request_deserializer=ondewo_dot_survey_dot_survey__pb2.GetAllSurveyAnswersRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.SurveyAnswersResponse.SerializeToString, ), 'CreateAgentSurvey': grpc.unary_unary_rpc_method_handler( servicer.CreateAgentSurvey, request_deserializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyResponse.SerializeToString, ), 'UpdateAgentSurvey':", "metadata) @staticmethod def CreateAgentSurvey(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None):", "'/ondewo.survey.Surveys/GetSurvey', ondewo_dot_survey_dot_survey__pb2.GetSurveyRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.Survey.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def", "raise NotImplementedError('Method not implemented!') def ListSurveys(self, request, context): \"\"\"Returns the list of all", "def UpdateAgentSurvey(self, request, context): \"\"\"Update an NLU agent from a survey \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED)", "servicer.GetSurveyAnswers, request_deserializer=ondewo_dot_survey_dot_survey__pb2.GetSurveyAnswersRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.SurveyAnswersResponse.SerializeToString, ), 'GetAllSurveyAnswers': grpc.unary_unary_rpc_method_handler( servicer.GetAllSurveyAnswers, request_deserializer=ondewo_dot_survey_dot_survey__pb2.GetAllSurveyAnswersRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.SurveyAnswersResponse.SerializeToString, ), 'CreateAgentSurvey': grpc.unary_unary_rpc_method_handler( servicer.CreateAgentSurvey,", "wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/GetAllSurveyAnswers', ondewo_dot_survey_dot_survey__pb2.GetAllSurveyAnswersRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.SurveyAnswersResponse.FromString, options, channel_credentials, insecure, call_credentials,", "), 'ListSurveys': grpc.unary_unary_rpc_method_handler( servicer.ListSurveys, request_deserializer=ondewo_dot_survey_dot_survey__pb2.ListSurveysRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.ListSurveysResponse.SerializeToString, ), 'GetSurveyAnswers': grpc.unary_unary_rpc_method_handler( servicer.GetSurveyAnswers, request_deserializer=ondewo_dot_survey_dot_survey__pb2.GetSurveyAnswersRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.SurveyAnswersResponse.SerializeToString, ),", "channel): \"\"\"Constructor. Args: channel: A grpc.Channel. \"\"\" self.CreateSurvey = channel.unary_unary( '/ondewo.survey.Surveys/CreateSurvey', request_serializer=ondewo_dot_survey_dot_survey__pb2.CreateSurveyRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.Survey.FromString,", "options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/CreateSurvey', ondewo_dot_survey_dot_survey__pb2.CreateSurveyRequest.SerializeToString,", "def CreateSurvey(self, request, context): \"\"\"Create a Survey and an empty NLU Agent for", "not implemented!') raise NotImplementedError('Method not implemented!') def GetAllSurveyAnswers(self, request, context): \"\"\"Retrieve all answers", "metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/DeleteAgentSurvey', ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.SerializeToString, google_dot_protobuf_dot_empty__pb2.Empty.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready,", "def GetSurveyAnswers(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request,", "\"\"\"Client and server classes corresponding to protobuf-defined services.\"\"\" import grpc from google.protobuf import", "context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def GetSurvey(self, request, context): \"\"\"Retrieve", "'/ondewo.survey.Surveys/GetAllSurveyAnswers', ondewo_dot_survey_dot_survey__pb2.GetAllSurveyAnswersRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.SurveyAnswersResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def", "not implemented!') def UpdateAgentSurvey(self, request, context): \"\"\"Update an NLU agent from a survey", "request, context): \"\"\"Update an existing Survey message from the Database and return it", "associated agent (if existent) \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!')", "\"\"\"Retrieve a Survey message from the Database and return it \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method", "return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/CreateSurvey', ondewo_dot_survey_dot_survey__pb2.CreateSurveyRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.Survey.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout,", "context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def UpdateSurvey(self, request, context): \"\"\"Update", "= channel.unary_unary( '/ondewo.survey.Surveys/ListSurveys', request_serializer=ondewo_dot_survey_dot_survey__pb2.ListSurveysRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.ListSurveysResponse.FromString, ) self.GetSurveyAnswers = channel.unary_unary( '/ondewo.survey.Surveys/GetSurveyAnswers', request_serializer=ondewo_dot_survey_dot_survey__pb2.GetSurveyAnswersRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.SurveyAnswersResponse.FromString, )", "channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def ListSurveys(request, target, options=(), channel_credentials=None,", "insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def GetSurveyAnswers(request, target, options=(), channel_credentials=None, call_credentials=None,", "an EXPERIMENTAL API. class Surveys(object): \"\"\"///// Services /////// \"\"\" @staticmethod def CreateSurvey(request, target,", "NotImplementedError('Method not implemented!') def DeleteAgentSurvey(self, request, context): \"\"\"Deletes all data of an NLU", "insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/CreateSurvey', ondewo_dot_survey_dot_survey__pb2.CreateSurveyRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.Survey.FromString, options, channel_credentials,", "an empty NLU Agent for it \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method", "all data of an NLU agent associated to a survey \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method", "request, context): \"\"\"Create a Survey and an empty NLU Agent for it \"\"\"", "\"\"\"Update an existing Survey message from the Database and return it \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED)", "response_serializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyResponse.SerializeToString, ), 'DeleteAgentSurvey': grpc.unary_unary_rpc_method_handler( servicer.DeleteAgentSurvey, request_deserializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.FromString, response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( 'ondewo.survey.Surveys',", "request_serializer=ondewo_dot_survey_dot_survey__pb2.DeleteSurveyRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) self.ListSurveys = channel.unary_unary( '/ondewo.survey.Surveys/ListSurveys', request_serializer=ondewo_dot_survey_dot_survey__pb2.ListSurveysRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.ListSurveysResponse.FromString, ) self.GetSurveyAnswers = channel.unary_unary(", "request, context): \"\"\"Deletes all data of an NLU agent associated to a survey", "ondewo_dot_survey_dot_survey__pb2.GetSurveyRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.Survey.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def UpdateSurvey(request,", "def CreateAgentSurvey(self, request, context): \"\"\"Populate and configures an NLU Agent from a Survey", "UpdateAgentSurvey(self, request, context): \"\"\"Update an NLU agent from a survey \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method", "target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/DeleteSurvey',", "channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/CreateSurvey', ondewo_dot_survey_dot_survey__pb2.CreateSurveyRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.Survey.FromString,", "context): \"\"\"Update an existing Survey message from the Database and return it \"\"\"", "wait_for_ready, timeout, metadata) @staticmethod def CreateAgentSurvey(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None,", "channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/DeleteSurvey', ondewo_dot_survey_dot_survey__pb2.DeleteSurveyRequest.SerializeToString, google_dot_protobuf_dot_empty__pb2.Empty.FromString,", "ondewo_dot_survey_dot_survey__pb2.ListSurveysRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.ListSurveysResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def GetSurveyAnswers(request,", "channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def GetAllSurveyAnswers(request, target, options=(), channel_credentials=None,", "corresponding to protobuf-defined services.\"\"\" import grpc from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 from", ") self.DeleteSurvey = channel.unary_unary( '/ondewo.survey.Surveys/DeleteSurvey', request_serializer=ondewo_dot_survey_dot_survey__pb2.DeleteSurveyRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) self.ListSurveys = channel.unary_unary( '/ondewo.survey.Surveys/ListSurveys', request_serializer=ondewo_dot_survey_dot_survey__pb2.ListSurveysRequest.SerializeToString,", "NotImplementedError('Method not implemented!') def UpdateAgentSurvey(self, request, context): \"\"\"Update an NLU agent from a", "NLU Agent for it \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!')", "implemented!') raise NotImplementedError('Method not implemented!') def DeleteSurvey(self, request, context): \"\"\"Delete a survey and", "= channel.unary_unary( '/ondewo.survey.Surveys/DeleteSurvey', request_serializer=ondewo_dot_survey_dot_survey__pb2.DeleteSurveyRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) self.ListSurveys = channel.unary_unary( '/ondewo.survey.Surveys/ListSurveys', request_serializer=ondewo_dot_survey_dot_survey__pb2.ListSurveysRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.ListSurveysResponse.FromString, )", "self.GetSurvey = channel.unary_unary( '/ondewo.survey.Surveys/GetSurvey', request_serializer=ondewo_dot_survey_dot_survey__pb2.GetSurveyRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.Survey.FromString, ) self.UpdateSurvey = channel.unary_unary( '/ondewo.survey.Surveys/UpdateSurvey', request_serializer=ondewo_dot_survey_dot_survey__pb2.UpdateSurveyRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.Survey.FromString,", "a Survey and an empty NLU Agent for it \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not", "\"\"\"Deletes all data of an NLU agent associated to a survey \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED)", "survey and its associated agent (if existent) \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise", "call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/CreateSurvey', ondewo_dot_survey_dot_survey__pb2.CreateSurveyRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.Survey.FromString, options,", "channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def DeleteSurvey(request, target, options=(), channel_credentials=None,", "grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/GetSurveyAnswers', ondewo_dot_survey_dot_survey__pb2.GetSurveyAnswersRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.SurveyAnswersResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata)", "options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def UpdateAgentSurvey(request, target, options=(),", "grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/UpdateSurvey', ondewo_dot_survey_dot_survey__pb2.UpdateSurveyRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.Survey.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata)", "metadata) @staticmethod def DeleteSurvey(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None):", "implemented!') def UpdateAgentSurvey(self, request, context): \"\"\"Update an NLU agent from a survey \"\"\"", "insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def CreateAgentSurvey(request, target, options=(), channel_credentials=None, call_credentials=None,", "ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.AgentSurveyResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def DeleteAgentSurvey(request,", "compression, wait_for_ready, timeout, metadata) @staticmethod def DeleteSurvey(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None,", "\"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def UpdateAgentSurvey(self, request, context):", "timeout, metadata) @staticmethod def ListSurveys(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None,", "import survey_pb2 as ondewo_dot_survey_dot_survey__pb2 class SurveysStub(object): \"\"\"///// Services /////// \"\"\" def __init__(self, channel):", "\"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def UpdateSurvey(self, request, context):", "part of an EXPERIMENTAL API. class Surveys(object): \"\"\"///// Services /////// \"\"\" @staticmethod def", "@staticmethod def GetSurvey(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return", "timeout, metadata) @staticmethod def UpdateAgentSurvey(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None,", "insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def DeleteAgentSurvey(request, target, options=(), channel_credentials=None, call_credentials=None,", "implemented!') raise NotImplementedError('Method not implemented!') def GetSurvey(self, request, context): \"\"\"Retrieve a Survey message", "implemented!') def GetAllSurveyAnswers(self, request, context): \"\"\"Retrieve all answers to survey questions collected in", "), 'GetSurveyAnswers': grpc.unary_unary_rpc_method_handler( servicer.GetSurveyAnswers, request_deserializer=ondewo_dot_survey_dot_survey__pb2.GetSurveyAnswersRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.SurveyAnswersResponse.SerializeToString, ), 'GetAllSurveyAnswers': grpc.unary_unary_rpc_method_handler( servicer.GetAllSurveyAnswers, request_deserializer=ondewo_dot_survey_dot_survey__pb2.GetAllSurveyAnswersRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.SurveyAnswersResponse.SerializeToString, ),", "response_deserializer=ondewo_dot_survey_dot_survey__pb2.SurveyAnswersResponse.FromString, ) self.GetAllSurveyAnswers = channel.unary_unary( '/ondewo.survey.Surveys/GetAllSurveyAnswers', request_serializer=ondewo_dot_survey_dot_survey__pb2.GetAllSurveyAnswersRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.SurveyAnswersResponse.FromString, ) self.CreateAgentSurvey = channel.unary_unary( '/ondewo.survey.Surveys/CreateAgentSurvey',", "raise NotImplementedError('Method not implemented!') def GetSurveyAnswers(self, request, context): \"\"\"Retrieve answers to survey questions", "timeout, metadata) @staticmethod def UpdateSurvey(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None,", "servicer.ListSurveys, request_deserializer=ondewo_dot_survey_dot_survey__pb2.ListSurveysRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.ListSurveysResponse.SerializeToString, ), 'GetSurveyAnswers': grpc.unary_unary_rpc_method_handler( servicer.GetSurveyAnswers, request_deserializer=ondewo_dot_survey_dot_survey__pb2.GetSurveyAnswersRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.SurveyAnswersResponse.SerializeToString, ), 'GetAllSurveyAnswers': grpc.unary_unary_rpc_method_handler( servicer.GetAllSurveyAnswers,", "agent associated to a survey \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not", "channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/GetSurvey', ondewo_dot_survey_dot_survey__pb2.GetSurveyRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.Survey.FromString,", "metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/DeleteSurvey', ondewo_dot_survey_dot_survey__pb2.DeleteSurveyRequest.SerializeToString, google_dot_protobuf_dot_empty__pb2.Empty.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready,", "call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def GetAllSurveyAnswers(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False,", "timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/GetAllSurveyAnswers', ondewo_dot_survey_dot_survey__pb2.GetAllSurveyAnswersRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.SurveyAnswersResponse.FromString, options, channel_credentials, insecure, call_credentials, compression,", "implemented!') raise NotImplementedError('Method not implemented!') def GetAllSurveyAnswers(self, request, context): \"\"\"Retrieve all answers to", "compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/DeleteSurvey', ondewo_dot_survey_dot_survey__pb2.DeleteSurveyRequest.SerializeToString, google_dot_protobuf_dot_empty__pb2.Empty.FromString, options, channel_credentials, insecure,", "target, '/ondewo.survey.Surveys/UpdateAgentSurvey', ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.AgentSurveyResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod", ") self.GetAllSurveyAnswers = channel.unary_unary( '/ondewo.survey.Surveys/GetAllSurveyAnswers', request_serializer=ondewo_dot_survey_dot_survey__pb2.GetAllSurveyAnswersRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.SurveyAnswersResponse.FromString, ) self.CreateAgentSurvey = channel.unary_unary( '/ondewo.survey.Surveys/CreateAgentSurvey', request_serializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.SerializeToString,", "def DeleteSurvey(self, request, context): \"\"\"Delete a survey and its associated agent (if existent)", "in interactions with a survey agent in any session \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not", "servicer.CreateAgentSurvey, request_deserializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyResponse.SerializeToString, ), 'UpdateAgentSurvey': grpc.unary_unary_rpc_method_handler( servicer.UpdateAgentSurvey, request_deserializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyResponse.SerializeToString, ), 'DeleteAgentSurvey': grpc.unary_unary_rpc_method_handler( servicer.DeleteAgentSurvey,", "self.UpdateSurvey = channel.unary_unary( '/ondewo.survey.Surveys/UpdateSurvey', request_serializer=ondewo_dot_survey_dot_survey__pb2.UpdateSurveyRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.Survey.FromString, ) self.DeleteSurvey = channel.unary_unary( '/ondewo.survey.Surveys/DeleteSurvey', request_serializer=ondewo_dot_survey_dot_survey__pb2.DeleteSurveyRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,", "compression, wait_for_ready, timeout, metadata) @staticmethod def GetSurveyAnswers(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None,", "A grpc.Channel. \"\"\" self.CreateSurvey = channel.unary_unary( '/ondewo.survey.Surveys/CreateSurvey', request_serializer=ondewo_dot_survey_dot_survey__pb2.CreateSurveyRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.Survey.FromString, ) self.GetSurvey = channel.unary_unary(", "not implemented!') def GetSurveyAnswers(self, request, context): \"\"\"Retrieve answers to survey questions collected in", "\"\"\"Retrieve all answers to survey questions collected in interactions with a survey agent", "metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/UpdateAgentSurvey', ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.AgentSurveyResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready,", "empty_pb2 as google_dot_protobuf_dot_empty__pb2 from ondewo.survey import survey_pb2 as ondewo_dot_survey_dot_survey__pb2 class SurveysStub(object): \"\"\"///// Services", "context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def GetSurveyAnswers(self, request, context): \"\"\"Retrieve", "return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/ListSurveys', ondewo_dot_survey_dot_survey__pb2.ListSurveysRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.ListSurveysResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout,", "NotImplementedError('Method not implemented!') def ListSurveys(self, request, context): \"\"\"Returns the list of all surveys", "wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/ListSurveys', ondewo_dot_survey_dot_survey__pb2.ListSurveysRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.ListSurveysResponse.FromString, options, channel_credentials, insecure, call_credentials,", "response_deserializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyResponse.FromString, ) self.UpdateAgentSurvey = channel.unary_unary( '/ondewo.survey.Surveys/UpdateAgentSurvey', request_serializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyResponse.FromString, ) self.DeleteAgentSurvey = channel.unary_unary( '/ondewo.survey.Surveys/DeleteAgentSurvey',", "metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/CreateAgentSurvey', ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.AgentSurveyResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready,", "ondewo_dot_survey_dot_survey__pb2.Survey.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def GetSurvey(request, target,", "# This class is part of an EXPERIMENTAL API. class Surveys(object): \"\"\"///// Services", "ondewo_dot_survey_dot_survey__pb2.AgentSurveyResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def UpdateAgentSurvey(request, target,", "options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def UpdateSurvey(request, target, options=(),", "), 'GetAllSurveyAnswers': grpc.unary_unary_rpc_method_handler( servicer.GetAllSurveyAnswers, request_deserializer=ondewo_dot_survey_dot_survey__pb2.GetAllSurveyAnswersRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.SurveyAnswersResponse.SerializeToString, ), 'CreateAgentSurvey': grpc.unary_unary_rpc_method_handler( servicer.CreateAgentSurvey, request_deserializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyResponse.SerializeToString, ),", "call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def ListSurveys(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False,", "an NLU agent from a survey \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method", "ondewo_dot_survey_dot_survey__pb2.UpdateSurveyRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.Survey.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def DeleteSurvey(request,", "def GetAllSurveyAnswers(self, request, context): \"\"\"Retrieve all answers to survey questions collected in interactions", "class SurveysStub(object): \"\"\"///// Services /////// \"\"\" def __init__(self, channel): \"\"\"Constructor. Args: channel: A", "servicer.GetSurvey, request_deserializer=ondewo_dot_survey_dot_survey__pb2.GetSurveyRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.Survey.SerializeToString, ), 'UpdateSurvey': grpc.unary_unary_rpc_method_handler( servicer.UpdateSurvey, request_deserializer=ondewo_dot_survey_dot_survey__pb2.UpdateSurveyRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.Survey.SerializeToString, ), 'DeleteSurvey': grpc.unary_unary_rpc_method_handler( servicer.DeleteSurvey,", "return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/GetSurvey', ondewo_dot_survey_dot_survey__pb2.GetSurveyRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.Survey.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout,", "call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/UpdateAgentSurvey', ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.AgentSurveyResponse.FromString, options,", "raise NotImplementedError('Method not implemented!') def add_SurveysServicer_to_server(servicer, server): rpc_method_handlers = { 'CreateSurvey': grpc.unary_unary_rpc_method_handler( servicer.CreateSurvey,", "response_serializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyResponse.SerializeToString, ), 'UpdateAgentSurvey': grpc.unary_unary_rpc_method_handler( servicer.UpdateAgentSurvey, request_deserializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyResponse.SerializeToString, ), 'DeleteAgentSurvey': grpc.unary_unary_rpc_method_handler( servicer.DeleteAgentSurvey, request_deserializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.FromString, response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,", "compression, wait_for_ready, timeout, metadata) @staticmethod def DeleteAgentSurvey(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None,", "options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/DeleteSurvey', ondewo_dot_survey_dot_survey__pb2.DeleteSurveyRequest.SerializeToString,", "grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/GetAllSurveyAnswers', ondewo_dot_survey_dot_survey__pb2.GetAllSurveyAnswersRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.SurveyAnswersResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata)", "request, context): \"\"\"Retrieve all answers to survey questions collected in interactions with a", "not implemented!') def UpdateSurvey(self, request, context): \"\"\"Update an existing Survey message from the", "compression, wait_for_ready, timeout, metadata) @staticmethod def ListSurveys(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None,", "insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/GetSurvey', ondewo_dot_survey_dot_survey__pb2.GetSurveyRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.Survey.FromString, options, channel_credentials,", "@staticmethod def UpdateAgentSurvey(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return", "metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/GetAllSurveyAnswers', ondewo_dot_survey_dot_survey__pb2.GetAllSurveyAnswersRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.SurveyAnswersResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready,", "context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def DeleteAgentSurvey(self, request, context): \"\"\"Deletes all", "wait_for_ready, timeout, metadata) @staticmethod def ListSurveys(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None,", "'GetSurveyAnswers': grpc.unary_unary_rpc_method_handler( servicer.GetSurveyAnswers, request_deserializer=ondewo_dot_survey_dot_survey__pb2.GetSurveyAnswersRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.SurveyAnswersResponse.SerializeToString, ), 'GetAllSurveyAnswers': grpc.unary_unary_rpc_method_handler( servicer.GetAllSurveyAnswers, request_deserializer=ondewo_dot_survey_dot_survey__pb2.GetAllSurveyAnswersRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.SurveyAnswersResponse.SerializeToString, ), 'CreateAgentSurvey':", "def DeleteSurvey(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request,", "context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def add_SurveysServicer_to_server(servicer, server): rpc_method_handlers = {", "self.DeleteSurvey = channel.unary_unary( '/ondewo.survey.Surveys/DeleteSurvey', request_serializer=ondewo_dot_survey_dot_survey__pb2.DeleteSurveyRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) self.ListSurveys = channel.unary_unary( '/ondewo.survey.Surveys/ListSurveys', request_serializer=ondewo_dot_survey_dot_survey__pb2.ListSurveysRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.ListSurveysResponse.FromString,", "not implemented!') raise NotImplementedError('Method not implemented!') def DeleteAgentSurvey(self, request, context): \"\"\"Deletes all data", "'CreateSurvey': grpc.unary_unary_rpc_method_handler( servicer.CreateSurvey, request_deserializer=ondewo_dot_survey_dot_survey__pb2.CreateSurveyRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.Survey.SerializeToString, ), 'GetSurvey': grpc.unary_unary_rpc_method_handler( servicer.GetSurvey, request_deserializer=ondewo_dot_survey_dot_survey__pb2.GetSurveyRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.Survey.SerializeToString, ), 'UpdateSurvey':", "compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/UpdateAgentSurvey', ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.AgentSurveyResponse.FromString, options, channel_credentials, insecure,", "CreateAgentSurvey(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target,", "options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/UpdateAgentSurvey', ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.SerializeToString,", "google_dot_protobuf_dot_empty__pb2 from ondewo.survey import survey_pb2 as ondewo_dot_survey_dot_survey__pb2 class SurveysStub(object): \"\"\"///// Services /////// \"\"\"", "servicer.DeleteAgentSurvey, request_deserializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.FromString, response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( 'ondewo.survey.Surveys', rpc_method_handlers) server.add_generic_rpc_handlers((generic_handler,)) # This", "\"\"\"Update an NLU agent from a survey \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise", "call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/ListSurveys', ondewo_dot_survey_dot_survey__pb2.ListSurveysRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.ListSurveysResponse.FromString, options,", "def GetSurveyAnswers(self, request, context): \"\"\"Retrieve answers to survey questions collected in interactions with", "not implemented!') raise NotImplementedError('Method not implemented!') def add_SurveysServicer_to_server(servicer, server): rpc_method_handlers = { 'CreateSurvey':", "request_deserializer=ondewo_dot_survey_dot_survey__pb2.UpdateSurveyRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.Survey.SerializeToString, ), 'DeleteSurvey': grpc.unary_unary_rpc_method_handler( servicer.DeleteSurvey, request_deserializer=ondewo_dot_survey_dot_survey__pb2.DeleteSurveyRequest.FromString, response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, ), 'ListSurveys': grpc.unary_unary_rpc_method_handler( servicer.ListSurveys, request_deserializer=ondewo_dot_survey_dot_survey__pb2.ListSurveysRequest.FromString,", "grpc.unary_unary_rpc_method_handler( servicer.DeleteSurvey, request_deserializer=ondewo_dot_survey_dot_survey__pb2.DeleteSurveyRequest.FromString, response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, ), 'ListSurveys': grpc.unary_unary_rpc_method_handler( servicer.ListSurveys, request_deserializer=ondewo_dot_survey_dot_survey__pb2.ListSurveysRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.ListSurveysResponse.SerializeToString, ), 'GetSurveyAnswers': grpc.unary_unary_rpc_method_handler(", "response_serializer=ondewo_dot_survey_dot_survey__pb2.SurveyAnswersResponse.SerializeToString, ), 'GetAllSurveyAnswers': grpc.unary_unary_rpc_method_handler( servicer.GetAllSurveyAnswers, request_deserializer=ondewo_dot_survey_dot_survey__pb2.GetAllSurveyAnswersRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.SurveyAnswersResponse.SerializeToString, ), 'CreateAgentSurvey': grpc.unary_unary_rpc_method_handler( servicer.CreateAgentSurvey, request_deserializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyResponse.SerializeToString,", "agent (if existent) \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def", "grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/GetSurvey', ondewo_dot_survey_dot_survey__pb2.GetSurveyRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.Survey.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata)", "'/ondewo.survey.Surveys/GetSurvey', request_serializer=ondewo_dot_survey_dot_survey__pb2.GetSurveyRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.Survey.FromString, ) self.UpdateSurvey = channel.unary_unary( '/ondewo.survey.Surveys/UpdateSurvey', request_serializer=ondewo_dot_survey_dot_survey__pb2.UpdateSurveyRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.Survey.FromString, ) self.DeleteSurvey =", "session \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def CreateAgentSurvey(self, request,", "compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/GetAllSurveyAnswers', ondewo_dot_survey_dot_survey__pb2.GetAllSurveyAnswersRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.SurveyAnswersResponse.FromString, options, channel_credentials, insecure,", "context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def add_SurveysServicer_to_server(servicer, server): rpc_method_handlers =", "grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/CreateSurvey', ondewo_dot_survey_dot_survey__pb2.CreateSurveyRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.Survey.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata)", "metadata) @staticmethod def GetSurveyAnswers(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None):", "ondewo_dot_survey_dot_survey__pb2.Survey.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def UpdateSurvey(request, target,", "wait_for_ready, timeout, metadata) @staticmethod def GetSurvey(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None,", "self.CreateAgentSurvey = channel.unary_unary( '/ondewo.survey.Surveys/CreateAgentSurvey', request_serializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyResponse.FromString, ) self.UpdateAgentSurvey = channel.unary_unary( '/ondewo.survey.Surveys/UpdateAgentSurvey', request_serializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyResponse.FromString,", "response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, ), 'ListSurveys': grpc.unary_unary_rpc_method_handler( servicer.ListSurveys, request_deserializer=ondewo_dot_survey_dot_survey__pb2.ListSurveysRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.ListSurveysResponse.SerializeToString, ), 'GetSurveyAnswers': grpc.unary_unary_rpc_method_handler( servicer.GetSurveyAnswers, request_deserializer=ondewo_dot_survey_dot_survey__pb2.GetSurveyAnswersRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.SurveyAnswersResponse.SerializeToString,", "compression, wait_for_ready, timeout, metadata) @staticmethod def UpdateSurvey(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None,", "agent from a survey \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!')", "timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/CreateAgentSurvey', ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.AgentSurveyResponse.FromString, options, channel_credentials, insecure, call_credentials, compression,", "request, context): \"\"\"Retrieve answers to survey questions collected in interactions with a survey", "call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def UpdateSurvey(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False,", "grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/UpdateAgentSurvey', ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.AgentSurveyResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata)", "compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/DeleteAgentSurvey', ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.SerializeToString, google_dot_protobuf_dot_empty__pb2.Empty.FromString, options, channel_credentials, insecure,", "insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def GetSurvey(request, target, options=(), channel_credentials=None, call_credentials=None,", "timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/CreateSurvey', ondewo_dot_survey_dot_survey__pb2.CreateSurveyRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.Survey.FromString, options, channel_credentials, insecure, call_credentials, compression,", "\"\"\" def __init__(self, channel): \"\"\"Constructor. Args: channel: A grpc.Channel. \"\"\" self.CreateSurvey = channel.unary_unary(", "compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/GetSurveyAnswers', ondewo_dot_survey_dot_survey__pb2.GetSurveyAnswersRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.SurveyAnswersResponse.FromString, options, channel_credentials, insecure,", "raise NotImplementedError('Method not implemented!') def DeleteAgentSurvey(self, request, context): \"\"\"Deletes all data of an", "return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/UpdateAgentSurvey', ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.AgentSurveyResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout,", "not implemented!') raise NotImplementedError('Method not implemented!') def GetSurvey(self, request, context): \"\"\"Retrieve a Survey", "not implemented!') def ListSurveys(self, request, context): \"\"\"Returns the list of all surveys in", "request_serializer=ondewo_dot_survey_dot_survey__pb2.GetAllSurveyAnswersRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.SurveyAnswersResponse.FromString, ) self.CreateAgentSurvey = channel.unary_unary( '/ondewo.survey.Surveys/CreateAgentSurvey', request_serializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyResponse.FromString, ) self.UpdateAgentSurvey = channel.unary_unary(", "grpc.unary_unary_rpc_method_handler( servicer.ListSurveys, request_deserializer=ondewo_dot_survey_dot_survey__pb2.ListSurveysRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.ListSurveysResponse.SerializeToString, ), 'GetSurveyAnswers': grpc.unary_unary_rpc_method_handler( servicer.GetSurveyAnswers, request_deserializer=ondewo_dot_survey_dot_survey__pb2.GetSurveyAnswersRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.SurveyAnswersResponse.SerializeToString, ), 'GetAllSurveyAnswers': grpc.unary_unary_rpc_method_handler(", "def GetSurvey(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request,", "google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 from ondewo.survey import survey_pb2 as ondewo_dot_survey_dot_survey__pb2 class SurveysStub(object):", "timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/ListSurveys', ondewo_dot_survey_dot_survey__pb2.ListSurveysRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.ListSurveysResponse.FromString, options, channel_credentials, insecure, call_credentials, compression,", "\"\"\" self.CreateSurvey = channel.unary_unary( '/ondewo.survey.Surveys/CreateSurvey', request_serializer=ondewo_dot_survey_dot_survey__pb2.CreateSurveyRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.Survey.FromString, ) self.GetSurvey = channel.unary_unary( '/ondewo.survey.Surveys/GetSurvey', request_serializer=ondewo_dot_survey_dot_survey__pb2.GetSurveyRequest.SerializeToString,", "'/ondewo.survey.Surveys/DeleteAgentSurvey', request_serializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) class SurveysServicer(object): \"\"\"///// Services /////// \"\"\" def CreateSurvey(self, request,", "= channel.unary_unary( '/ondewo.survey.Surveys/GetAllSurveyAnswers', request_serializer=ondewo_dot_survey_dot_survey__pb2.GetAllSurveyAnswersRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.SurveyAnswersResponse.FromString, ) self.CreateAgentSurvey = channel.unary_unary( '/ondewo.survey.Surveys/CreateAgentSurvey', request_serializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyResponse.FromString, )", "options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/UpdateSurvey', ondewo_dot_survey_dot_survey__pb2.UpdateSurveyRequest.SerializeToString,", "raise NotImplementedError('Method not implemented!') def UpdateAgentSurvey(self, request, context): \"\"\"Update an NLU agent from", "survey questions collected in interactions with a survey agent in any session \"\"\"", "= channel.unary_unary( '/ondewo.survey.Surveys/CreateSurvey', request_serializer=ondewo_dot_survey_dot_survey__pb2.CreateSurveyRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.Survey.FromString, ) self.GetSurvey = channel.unary_unary( '/ondewo.survey.Surveys/GetSurvey', request_serializer=ondewo_dot_survey_dot_survey__pb2.GetSurveyRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.Survey.FromString, )", "insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/DeleteAgentSurvey', ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.SerializeToString, google_dot_protobuf_dot_empty__pb2.Empty.FromString, options, channel_credentials,", "request_serializer=ondewo_dot_survey_dot_survey__pb2.CreateSurveyRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.Survey.FromString, ) self.GetSurvey = channel.unary_unary( '/ondewo.survey.Surveys/GetSurvey', request_serializer=ondewo_dot_survey_dot_survey__pb2.GetSurveyRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.Survey.FromString, ) self.UpdateSurvey = channel.unary_unary(", "response_deserializer=ondewo_dot_survey_dot_survey__pb2.SurveyAnswersResponse.FromString, ) self.CreateAgentSurvey = channel.unary_unary( '/ondewo.survey.Surveys/CreateAgentSurvey', request_serializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyResponse.FromString, ) self.UpdateAgentSurvey = channel.unary_unary( '/ondewo.survey.Surveys/UpdateAgentSurvey',", "call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/GetSurveyAnswers', ondewo_dot_survey_dot_survey__pb2.GetSurveyAnswersRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.SurveyAnswersResponse.FromString, options,", "NLU agent associated to a survey \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method", "self.GetAllSurveyAnswers = channel.unary_unary( '/ondewo.survey.Surveys/GetAllSurveyAnswers', request_serializer=ondewo_dot_survey_dot_survey__pb2.GetAllSurveyAnswersRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.SurveyAnswersResponse.FromString, ) self.CreateAgentSurvey = channel.unary_unary( '/ondewo.survey.Surveys/CreateAgentSurvey', request_serializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyResponse.FromString,", "request_serializer=ondewo_dot_survey_dot_survey__pb2.GetSurveyRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.Survey.FromString, ) self.UpdateSurvey = channel.unary_unary( '/ondewo.survey.Surveys/UpdateSurvey', request_serializer=ondewo_dot_survey_dot_survey__pb2.UpdateSurveyRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.Survey.FromString, ) self.DeleteSurvey = channel.unary_unary(", "grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/DeleteSurvey', ondewo_dot_survey_dot_survey__pb2.DeleteSurveyRequest.SerializeToString, google_dot_protobuf_dot_empty__pb2.Empty.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata)", "), 'GetSurvey': grpc.unary_unary_rpc_method_handler( servicer.GetSurvey, request_deserializer=ondewo_dot_survey_dot_survey__pb2.GetSurveyRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.Survey.SerializeToString, ), 'UpdateSurvey': grpc.unary_unary_rpc_method_handler( servicer.UpdateSurvey, request_deserializer=ondewo_dot_survey_dot_survey__pb2.UpdateSurveyRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.Survey.SerializeToString, ),", "call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def GetSurvey(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False,", "target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/CreateSurvey',", "insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def ListSurveys(request, target, options=(), channel_credentials=None, call_credentials=None,", "class SurveysServicer(object): \"\"\"///// Services /////// \"\"\" def CreateSurvey(self, request, context): \"\"\"Create a Survey", "def UpdateSurvey(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request,", "wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/UpdateSurvey', ondewo_dot_survey_dot_survey__pb2.UpdateSurveyRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.Survey.FromString, options, channel_credentials, insecure, call_credentials,", "'/ondewo.survey.Surveys/ListSurveys', ondewo_dot_survey_dot_survey__pb2.ListSurveysRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.ListSurveysResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def", "compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/CreateAgentSurvey', ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.AgentSurveyResponse.FromString, options, channel_credentials, insecure,", "import grpc from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 from ondewo.survey import survey_pb2 as", "not implemented!') def add_SurveysServicer_to_server(servicer, server): rpc_method_handlers = { 'CreateSurvey': grpc.unary_unary_rpc_method_handler( servicer.CreateSurvey, request_deserializer=ondewo_dot_survey_dot_survey__pb2.CreateSurveyRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.Survey.SerializeToString,", "for it \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def GetSurvey(self,", "metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/CreateSurvey', ondewo_dot_survey_dot_survey__pb2.CreateSurveyRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.Survey.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready,", "target, '/ondewo.survey.Surveys/ListSurveys', ondewo_dot_survey_dot_survey__pb2.ListSurveysRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.ListSurveysResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod", "wait_for_ready, timeout, metadata) @staticmethod def GetSurveyAnswers(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None,", "context): \"\"\"Deletes all data of an NLU agent associated to a survey \"\"\"", "raise NotImplementedError('Method not implemented!') def DeleteSurvey(self, request, context): \"\"\"Delete a survey and its", "not implemented!') def GetAllSurveyAnswers(self, request, context): \"\"\"Retrieve all answers to survey questions collected", "def UpdateAgentSurvey(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request,", ") self.GetSurvey = channel.unary_unary( '/ondewo.survey.Surveys/GetSurvey', request_serializer=ondewo_dot_survey_dot_survey__pb2.GetSurveyRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.Survey.FromString, ) self.UpdateSurvey = channel.unary_unary( '/ondewo.survey.Surveys/UpdateSurvey', request_serializer=ondewo_dot_survey_dot_survey__pb2.UpdateSurveyRequest.SerializeToString,", "target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/GetSurvey',", "for a specific session \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!')", "Surveys(object): \"\"\"///// Services /////// \"\"\" @staticmethod def CreateSurvey(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False,", "channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/GetAllSurveyAnswers', ondewo_dot_survey_dot_survey__pb2.GetAllSurveyAnswersRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.SurveyAnswersResponse.FromString,", "def GetSurvey(self, request, context): \"\"\"Retrieve a Survey message from the Database and return", "/////// \"\"\" @staticmethod def CreateSurvey(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None,", "timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/GetSurveyAnswers', ondewo_dot_survey_dot_survey__pb2.GetSurveyAnswersRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.SurveyAnswersResponse.FromString, options, channel_credentials, insecure, call_credentials, compression,", "a survey \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def DeleteAgentSurvey(self,", "GetSurvey(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target,", "an NLU agent associated to a survey \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise", "EDIT! \"\"\"Client and server classes corresponding to protobuf-defined services.\"\"\" import grpc from google.protobuf", "ondewo_dot_survey_dot_survey__pb2.SurveyAnswersResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def GetAllSurveyAnswers(request, target,", "target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/GetSurveyAnswers',", "grpc.unary_unary_rpc_method_handler( servicer.UpdateSurvey, request_deserializer=ondewo_dot_survey_dot_survey__pb2.UpdateSurveyRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.Survey.SerializeToString, ), 'DeleteSurvey': grpc.unary_unary_rpc_method_handler( servicer.DeleteSurvey, request_deserializer=ondewo_dot_survey_dot_survey__pb2.DeleteSurveyRequest.FromString, response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, ), 'ListSurveys': grpc.unary_unary_rpc_method_handler(", "request_deserializer=ondewo_dot_survey_dot_survey__pb2.CreateSurveyRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.Survey.SerializeToString, ), 'GetSurvey': grpc.unary_unary_rpc_method_handler( servicer.GetSurvey, request_deserializer=ondewo_dot_survey_dot_survey__pb2.GetSurveyRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.Survey.SerializeToString, ), 'UpdateSurvey': grpc.unary_unary_rpc_method_handler( servicer.UpdateSurvey, request_deserializer=ondewo_dot_survey_dot_survey__pb2.UpdateSurveyRequest.FromString,", "is part of an EXPERIMENTAL API. class Surveys(object): \"\"\"///// Services /////// \"\"\" @staticmethod", "options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/ListSurveys', ondewo_dot_survey_dot_survey__pb2.ListSurveysRequest.SerializeToString,", "GetAllSurveyAnswers(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target,", "servicer.DeleteSurvey, request_deserializer=ondewo_dot_survey_dot_survey__pb2.DeleteSurveyRequest.FromString, response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, ), 'ListSurveys': grpc.unary_unary_rpc_method_handler( servicer.ListSurveys, request_deserializer=ondewo_dot_survey_dot_survey__pb2.ListSurveysRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.ListSurveysResponse.SerializeToString, ), 'GetSurveyAnswers': grpc.unary_unary_rpc_method_handler( servicer.GetSurveyAnswers,", "NotImplementedError('Method not implemented!') def add_SurveysServicer_to_server(servicer, server): rpc_method_handlers = { 'CreateSurvey': grpc.unary_unary_rpc_method_handler( servicer.CreateSurvey, request_deserializer=ondewo_dot_survey_dot_survey__pb2.CreateSurveyRequest.FromString,", "'/ondewo.survey.Surveys/CreateAgentSurvey', request_serializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyResponse.FromString, ) self.UpdateAgentSurvey = channel.unary_unary( '/ondewo.survey.Surveys/UpdateAgentSurvey', request_serializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyResponse.FromString, ) self.DeleteAgentSurvey =", "grpc.unary_unary_rpc_method_handler( servicer.GetSurvey, request_deserializer=ondewo_dot_survey_dot_survey__pb2.GetSurveyRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.Survey.SerializeToString, ), 'UpdateSurvey': grpc.unary_unary_rpc_method_handler( servicer.UpdateSurvey, request_deserializer=ondewo_dot_survey_dot_survey__pb2.UpdateSurveyRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.Survey.SerializeToString, ), 'DeleteSurvey': grpc.unary_unary_rpc_method_handler(", "from a Survey \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def", "not implemented!') def GetSurvey(self, request, context): \"\"\"Retrieve a Survey message from the Database", "NotImplementedError('Method not implemented!') def DeleteSurvey(self, request, context): \"\"\"Delete a survey and its associated", "it \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def UpdateSurvey(self, request,", "compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/UpdateSurvey', ondewo_dot_survey_dot_survey__pb2.UpdateSurveyRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.Survey.FromString, options, channel_credentials, insecure,", "implemented!') raise NotImplementedError('Method not implemented!') def ListSurveys(self, request, context): \"\"\"Returns the list of", "survey agent in any session \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not", "metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/GetSurveyAnswers', ondewo_dot_survey_dot_survey__pb2.GetSurveyAnswersRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.SurveyAnswersResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready,", "context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def DeleteSurvey(self, request, context): \"\"\"Delete", "not implemented!') def DeleteAgentSurvey(self, request, context): \"\"\"Deletes all data of an NLU agent", "rpc_method_handlers = { 'CreateSurvey': grpc.unary_unary_rpc_method_handler( servicer.CreateSurvey, request_deserializer=ondewo_dot_survey_dot_survey__pb2.CreateSurveyRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.Survey.SerializeToString, ), 'GetSurvey': grpc.unary_unary_rpc_method_handler( servicer.GetSurvey, request_deserializer=ondewo_dot_survey_dot_survey__pb2.GetSurveyRequest.FromString,", "channel.unary_unary( '/ondewo.survey.Surveys/CreateAgentSurvey', request_serializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyResponse.FromString, ) self.UpdateAgentSurvey = channel.unary_unary( '/ondewo.survey.Surveys/UpdateAgentSurvey', request_serializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyResponse.FromString, ) self.DeleteAgentSurvey", "\"\"\"Constructor. Args: channel: A grpc.Channel. \"\"\" self.CreateSurvey = channel.unary_unary( '/ondewo.survey.Surveys/CreateSurvey', request_serializer=ondewo_dot_survey_dot_survey__pb2.CreateSurveyRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.Survey.FromString, )", "@staticmethod def DeleteSurvey(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return", "@staticmethod def UpdateSurvey(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return", "SurveysStub(object): \"\"\"///// Services /////// \"\"\" def __init__(self, channel): \"\"\"Constructor. Args: channel: A grpc.Channel.", "channel.unary_unary( '/ondewo.survey.Surveys/DeleteAgentSurvey', request_serializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) class SurveysServicer(object): \"\"\"///// Services /////// \"\"\" def CreateSurvey(self,", "UpdateSurvey(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target,", "ListSurveys(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target,", "channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/UpdateAgentSurvey', ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.AgentSurveyResponse.FromString,", "/////// \"\"\" def CreateSurvey(self, request, context): \"\"\"Create a Survey and an empty NLU", "Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! \"\"\"Client and", "call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/DeleteSurvey', ondewo_dot_survey_dot_survey__pb2.DeleteSurveyRequest.SerializeToString, google_dot_protobuf_dot_empty__pb2.Empty.FromString, options,", "ondewo.survey import survey_pb2 as ondewo_dot_survey_dot_survey__pb2 class SurveysStub(object): \"\"\"///// Services /////// \"\"\" def __init__(self,", "insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/GetAllSurveyAnswers', ondewo_dot_survey_dot_survey__pb2.GetAllSurveyAnswersRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.SurveyAnswersResponse.FromString, options, channel_credentials,", "call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def CreateAgentSurvey(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False,", "'/ondewo.survey.Surveys/CreateAgentSurvey', ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.AgentSurveyResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def", "timeout, metadata) @staticmethod def GetAllSurveyAnswers(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None,", "classes corresponding to protobuf-defined services.\"\"\" import grpc from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2", "wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/DeleteAgentSurvey', ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.SerializeToString, google_dot_protobuf_dot_empty__pb2.Empty.FromString, options, channel_credentials, insecure, call_credentials,", "'/ondewo.survey.Surveys/CreateSurvey', ondewo_dot_survey_dot_survey__pb2.CreateSurveyRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.Survey.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def", "channel.unary_unary( '/ondewo.survey.Surveys/CreateSurvey', request_serializer=ondewo_dot_survey_dot_survey__pb2.CreateSurveyRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.Survey.FromString, ) self.GetSurvey = channel.unary_unary( '/ondewo.survey.Surveys/GetSurvey', request_serializer=ondewo_dot_survey_dot_survey__pb2.GetSurveyRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.Survey.FromString, ) self.UpdateSurvey", "'UpdateSurvey': grpc.unary_unary_rpc_method_handler( servicer.UpdateSurvey, request_deserializer=ondewo_dot_survey_dot_survey__pb2.UpdateSurveyRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.Survey.SerializeToString, ), 'DeleteSurvey': grpc.unary_unary_rpc_method_handler( servicer.DeleteSurvey, request_deserializer=ondewo_dot_survey_dot_survey__pb2.DeleteSurveyRequest.FromString, response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, ), 'ListSurveys':", "services.\"\"\" import grpc from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 from ondewo.survey import survey_pb2", "grpc.unary_unary_rpc_method_handler( servicer.DeleteAgentSurvey, request_deserializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.FromString, response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( 'ondewo.survey.Surveys', rpc_method_handlers) server.add_generic_rpc_handlers((generic_handler,)) #", "@staticmethod def CreateAgentSurvey(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return", "target, '/ondewo.survey.Surveys/UpdateSurvey', ondewo_dot_survey_dot_survey__pb2.UpdateSurveyRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.Survey.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod", "and configures an NLU Agent from a Survey \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!')", "raise NotImplementedError('Method not implemented!') def UpdateSurvey(self, request, context): \"\"\"Update an existing Survey message", "response_serializer=ondewo_dot_survey_dot_survey__pb2.Survey.SerializeToString, ), 'DeleteSurvey': grpc.unary_unary_rpc_method_handler( servicer.DeleteSurvey, request_deserializer=ondewo_dot_survey_dot_survey__pb2.DeleteSurveyRequest.FromString, response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, ), 'ListSurveys': grpc.unary_unary_rpc_method_handler( servicer.ListSurveys, request_deserializer=ondewo_dot_survey_dot_survey__pb2.ListSurveysRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.ListSurveysResponse.SerializeToString,", "\"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def add_SurveysServicer_to_server(servicer, server): rpc_method_handlers", "GetSurveyAnswers(self, request, context): \"\"\"Retrieve answers to survey questions collected in interactions with a", "call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/CreateAgentSurvey', ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.AgentSurveyResponse.FromString, options,", "context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def GetSurvey(self, request, context): \"\"\"Retrieve a", "channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def GetSurveyAnswers(request, target, options=(), channel_credentials=None,", "response_serializer=ondewo_dot_survey_dot_survey__pb2.Survey.SerializeToString, ), 'UpdateSurvey': grpc.unary_unary_rpc_method_handler( servicer.UpdateSurvey, request_deserializer=ondewo_dot_survey_dot_survey__pb2.UpdateSurveyRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.Survey.SerializeToString, ), 'DeleteSurvey': grpc.unary_unary_rpc_method_handler( servicer.DeleteSurvey, request_deserializer=ondewo_dot_survey_dot_survey__pb2.DeleteSurveyRequest.FromString, response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,", "channel.unary_unary( '/ondewo.survey.Surveys/DeleteSurvey', request_serializer=ondewo_dot_survey_dot_survey__pb2.DeleteSurveyRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) self.ListSurveys = channel.unary_unary( '/ondewo.survey.Surveys/ListSurveys', request_serializer=ondewo_dot_survey_dot_survey__pb2.ListSurveysRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.ListSurveysResponse.FromString, ) self.GetSurveyAnswers", "request, context): \"\"\"Retrieve a Survey message from the Database and return it \"\"\"", "@staticmethod def DeleteAgentSurvey(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return", "'/ondewo.survey.Surveys/DeleteSurvey', request_serializer=ondewo_dot_survey_dot_survey__pb2.DeleteSurveyRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) self.ListSurveys = channel.unary_unary( '/ondewo.survey.Surveys/ListSurveys', request_serializer=ondewo_dot_survey_dot_survey__pb2.ListSurveysRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.ListSurveysResponse.FromString, ) self.GetSurveyAnswers =", "an existing Survey message from the Database and return it \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method", "a specific session \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def", "context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def CreateAgentSurvey(self, request, context): \"\"\"Populate", "response_serializer=ondewo_dot_survey_dot_survey__pb2.SurveyAnswersResponse.SerializeToString, ), 'CreateAgentSurvey': grpc.unary_unary_rpc_method_handler( servicer.CreateAgentSurvey, request_deserializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyResponse.SerializeToString, ), 'UpdateAgentSurvey': grpc.unary_unary_rpc_method_handler( servicer.UpdateAgentSurvey, request_deserializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyResponse.SerializeToString,", "context): \"\"\"Create a Survey and an empty NLU Agent for it \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED)", "grpc.unary_unary_rpc_method_handler( servicer.CreateSurvey, request_deserializer=ondewo_dot_survey_dot_survey__pb2.CreateSurveyRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.Survey.SerializeToString, ), 'GetSurvey': grpc.unary_unary_rpc_method_handler( servicer.GetSurvey, request_deserializer=ondewo_dot_survey_dot_survey__pb2.GetSurveyRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.Survey.SerializeToString, ), 'UpdateSurvey': grpc.unary_unary_rpc_method_handler(", "\"\"\"///// Services /////// \"\"\" def __init__(self, channel): \"\"\"Constructor. Args: channel: A grpc.Channel. \"\"\"", "metadata) @staticmethod def DeleteAgentSurvey(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None):", "target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/CreateAgentSurvey',", "def add_SurveysServicer_to_server(servicer, server): rpc_method_handlers = { 'CreateSurvey': grpc.unary_unary_rpc_method_handler( servicer.CreateSurvey, request_deserializer=ondewo_dot_survey_dot_survey__pb2.CreateSurveyRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.Survey.SerializeToString, ), 'GetSurvey':", "in any session \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def", "implemented!') def DeleteSurvey(self, request, context): \"\"\"Delete a survey and its associated agent (if", "return it \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def UpdateSurvey(self,", "Args: channel: A grpc.Channel. \"\"\" self.CreateSurvey = channel.unary_unary( '/ondewo.survey.Surveys/CreateSurvey', request_serializer=ondewo_dot_survey_dot_survey__pb2.CreateSurveyRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.Survey.FromString, ) self.GetSurvey", "add_SurveysServicer_to_server(servicer, server): rpc_method_handlers = { 'CreateSurvey': grpc.unary_unary_rpc_method_handler( servicer.CreateSurvey, request_deserializer=ondewo_dot_survey_dot_survey__pb2.CreateSurveyRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.Survey.SerializeToString, ), 'GetSurvey': grpc.unary_unary_rpc_method_handler(", "grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/DeleteAgentSurvey', ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.SerializeToString, google_dot_protobuf_dot_empty__pb2.Empty.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata)", "implemented!') def GetSurvey(self, request, context): \"\"\"Retrieve a Survey message from the Database and", "Python protocol compiler plugin. DO NOT EDIT! \"\"\"Client and server classes corresponding to", "API. class Surveys(object): \"\"\"///// Services /////// \"\"\" @staticmethod def CreateSurvey(request, target, options=(), channel_credentials=None,", "UpdateSurvey(self, request, context): \"\"\"Update an existing Survey message from the Database and return", "wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/CreateSurvey', ondewo_dot_survey_dot_survey__pb2.CreateSurveyRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.Survey.FromString, options, channel_credentials, insecure, call_credentials,", "UpdateAgentSurvey(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target,", "questions collected in interactions with a survey agent in any session \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED)", "the gRPC Python protocol compiler plugin. DO NOT EDIT! \"\"\"Client and server classes", "collected in interactions with a survey agent for a specific session \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED)", "'/ondewo.survey.Surveys/DeleteSurvey', ondewo_dot_survey_dot_survey__pb2.DeleteSurveyRequest.SerializeToString, google_dot_protobuf_dot_empty__pb2.Empty.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def", "existing Survey message from the Database and return it \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not", "ondewo_dot_survey_dot_survey__pb2.ListSurveysResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def GetSurveyAnswers(request, target,", "compiler plugin. DO NOT EDIT! \"\"\"Client and server classes corresponding to protobuf-defined services.\"\"\"", "channel.unary_unary( '/ondewo.survey.Surveys/GetAllSurveyAnswers', request_serializer=ondewo_dot_survey_dot_survey__pb2.GetAllSurveyAnswersRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.SurveyAnswersResponse.FromString, ) self.CreateAgentSurvey = channel.unary_unary( '/ondewo.survey.Surveys/CreateAgentSurvey', request_serializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyResponse.FromString, ) self.UpdateAgentSurvey", "channel.unary_unary( '/ondewo.survey.Surveys/UpdateAgentSurvey', request_serializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyResponse.FromString, ) self.DeleteAgentSurvey = channel.unary_unary( '/ondewo.survey.Surveys/DeleteAgentSurvey', request_serializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) class", "context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def DeleteAgentSurvey(self, request, context): \"\"\"Deletes", "interactions with a survey agent in any session \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!')", "# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! \"\"\"Client", "channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def CreateAgentSurvey(request, target, options=(), channel_credentials=None,", "options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/GetSurvey', ondewo_dot_survey_dot_survey__pb2.GetSurveyRequest.SerializeToString,", "wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/DeleteSurvey', ondewo_dot_survey_dot_survey__pb2.DeleteSurveyRequest.SerializeToString, google_dot_protobuf_dot_empty__pb2.Empty.FromString, options, channel_credentials, insecure, call_credentials,", "'DeleteSurvey': grpc.unary_unary_rpc_method_handler( servicer.DeleteSurvey, request_deserializer=ondewo_dot_survey_dot_survey__pb2.DeleteSurveyRequest.FromString, response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, ), 'ListSurveys': grpc.unary_unary_rpc_method_handler( servicer.ListSurveys, request_deserializer=ondewo_dot_survey_dot_survey__pb2.ListSurveysRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.ListSurveysResponse.SerializeToString, ), 'GetSurveyAnswers':", "options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/GetAllSurveyAnswers', ondewo_dot_survey_dot_survey__pb2.GetAllSurveyAnswersRequest.SerializeToString,", "implemented!') def GetSurveyAnswers(self, request, context): \"\"\"Retrieve answers to survey questions collected in interactions", "CreateSurvey(self, request, context): \"\"\"Create a Survey and an empty NLU Agent for it", "metadata) @staticmethod def GetAllSurveyAnswers(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None):", "target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/DeleteAgentSurvey',", "session \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def GetAllSurveyAnswers(self, request,", "= channel.unary_unary( '/ondewo.survey.Surveys/DeleteAgentSurvey', request_serializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) class SurveysServicer(object): \"\"\"///// Services /////// \"\"\" def", "from the Database and return it \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method", "wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/UpdateAgentSurvey', ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.AgentSurveyResponse.FromString, options, channel_credentials, insecure, call_credentials,", "return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/DeleteAgentSurvey', ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.SerializeToString, google_dot_protobuf_dot_empty__pb2.Empty.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout,", "metadata) @staticmethod def UpdateSurvey(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None):", "in the server \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def", "timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/GetSurvey', ondewo_dot_survey_dot_survey__pb2.GetSurveyRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.Survey.FromString, options, channel_credentials, insecure, call_credentials, compression,", "channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/DeleteAgentSurvey', ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.SerializeToString, google_dot_protobuf_dot_empty__pb2.Empty.FromString,", "response_deserializer=ondewo_dot_survey_dot_survey__pb2.ListSurveysResponse.FromString, ) self.GetSurveyAnswers = channel.unary_unary( '/ondewo.survey.Surveys/GetSurveyAnswers', request_serializer=ondewo_dot_survey_dot_survey__pb2.GetSurveyAnswersRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.SurveyAnswersResponse.FromString, ) self.GetAllSurveyAnswers = channel.unary_unary( '/ondewo.survey.Surveys/GetAllSurveyAnswers',", "def ListSurveys(self, request, context): \"\"\"Returns the list of all surveys in the server", "server \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def GetSurveyAnswers(self, request,", "context): \"\"\"Update an NLU agent from a survey \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!')", "and server classes corresponding to protobuf-defined services.\"\"\" import grpc from google.protobuf import empty_pb2", ") self.GetSurveyAnswers = channel.unary_unary( '/ondewo.survey.Surveys/GetSurveyAnswers', request_serializer=ondewo_dot_survey_dot_survey__pb2.GetSurveyAnswersRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.SurveyAnswersResponse.FromString, ) self.GetAllSurveyAnswers = channel.unary_unary( '/ondewo.survey.Surveys/GetAllSurveyAnswers', request_serializer=ondewo_dot_survey_dot_survey__pb2.GetAllSurveyAnswersRequest.SerializeToString,", "response_deserializer=ondewo_dot_survey_dot_survey__pb2.Survey.FromString, ) self.GetSurvey = channel.unary_unary( '/ondewo.survey.Surveys/GetSurvey', request_serializer=ondewo_dot_survey_dot_survey__pb2.GetSurveyRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.Survey.FromString, ) self.UpdateSurvey = channel.unary_unary( '/ondewo.survey.Surveys/UpdateSurvey',", "NOT EDIT! \"\"\"Client and server classes corresponding to protobuf-defined services.\"\"\" import grpc from", "NotImplementedError('Method not implemented!') def CreateAgentSurvey(self, request, context): \"\"\"Populate and configures an NLU Agent", "return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/UpdateSurvey', ondewo_dot_survey_dot_survey__pb2.UpdateSurveyRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.Survey.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout,", "response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) self.ListSurveys = channel.unary_unary( '/ondewo.survey.Surveys/ListSurveys', request_serializer=ondewo_dot_survey_dot_survey__pb2.ListSurveysRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.ListSurveysResponse.FromString, ) self.GetSurveyAnswers = channel.unary_unary( '/ondewo.survey.Surveys/GetSurveyAnswers',", "options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def DeleteSurvey(request, target, options=(),", "response_deserializer=ondewo_dot_survey_dot_survey__pb2.Survey.FromString, ) self.UpdateSurvey = channel.unary_unary( '/ondewo.survey.Surveys/UpdateSurvey', request_serializer=ondewo_dot_survey_dot_survey__pb2.UpdateSurveyRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.Survey.FromString, ) self.DeleteSurvey = channel.unary_unary( '/ondewo.survey.Surveys/DeleteSurvey',", "return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/DeleteSurvey', ondewo_dot_survey_dot_survey__pb2.DeleteSurveyRequest.SerializeToString, google_dot_protobuf_dot_empty__pb2.Empty.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout,", "request_serializer=ondewo_dot_survey_dot_survey__pb2.ListSurveysRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.ListSurveysResponse.FromString, ) self.GetSurveyAnswers = channel.unary_unary( '/ondewo.survey.Surveys/GetSurveyAnswers', request_serializer=ondewo_dot_survey_dot_survey__pb2.GetSurveyAnswersRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.SurveyAnswersResponse.FromString, ) self.GetAllSurveyAnswers = channel.unary_unary(", "def DeleteAgentSurvey(self, request, context): \"\"\"Deletes all data of an NLU agent associated to", "channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/UpdateSurvey', ondewo_dot_survey_dot_survey__pb2.UpdateSurveyRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.Survey.FromString,", "context): \"\"\"Populate and configures an NLU Agent from a Survey \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method", "\"\"\"Delete a survey and its associated agent (if existent) \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not", "Survey and an empty NLU Agent for it \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!')", "insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def GetAllSurveyAnswers(request, target, options=(), channel_credentials=None, call_credentials=None,", "/////// \"\"\" def __init__(self, channel): \"\"\"Constructor. Args: channel: A grpc.Channel. \"\"\" self.CreateSurvey =", "channel.unary_unary( '/ondewo.survey.Surveys/GetSurveyAnswers', request_serializer=ondewo_dot_survey_dot_survey__pb2.GetSurveyAnswersRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.SurveyAnswersResponse.FromString, ) self.GetAllSurveyAnswers = channel.unary_unary( '/ondewo.survey.Surveys/GetAllSurveyAnswers', request_serializer=ondewo_dot_survey_dot_survey__pb2.GetAllSurveyAnswersRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.SurveyAnswersResponse.FromString, ) self.CreateAgentSurvey", "request_deserializer=ondewo_dot_survey_dot_survey__pb2.GetAllSurveyAnswersRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.SurveyAnswersResponse.SerializeToString, ), 'CreateAgentSurvey': grpc.unary_unary_rpc_method_handler( servicer.CreateAgentSurvey, request_deserializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyResponse.SerializeToString, ), 'UpdateAgentSurvey': grpc.unary_unary_rpc_method_handler( servicer.UpdateAgentSurvey, request_deserializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.FromString,", "context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def CreateAgentSurvey(self, request, context): \"\"\"Populate and", "survey \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def DeleteAgentSurvey(self, request,", "raise NotImplementedError('Method not implemented!') def GetSurvey(self, request, context): \"\"\"Retrieve a Survey message from", "context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def UpdateAgentSurvey(self, request, context): \"\"\"Update an", "insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/ListSurveys', ondewo_dot_survey_dot_survey__pb2.ListSurveysRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.ListSurveysResponse.FromString, options, channel_credentials,", "not implemented!') def CreateAgentSurvey(self, request, context): \"\"\"Populate and configures an NLU Agent from", "servicer.GetAllSurveyAnswers, request_deserializer=ondewo_dot_survey_dot_survey__pb2.GetAllSurveyAnswersRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.SurveyAnswersResponse.SerializeToString, ), 'CreateAgentSurvey': grpc.unary_unary_rpc_method_handler( servicer.CreateAgentSurvey, request_deserializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyResponse.SerializeToString, ), 'UpdateAgentSurvey': grpc.unary_unary_rpc_method_handler( servicer.UpdateAgentSurvey,", "not implemented!') raise NotImplementedError('Method not implemented!') def UpdateSurvey(self, request, context): \"\"\"Update an existing", "target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/ListSurveys',", "specific session \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def GetAllSurveyAnswers(self,", "insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def UpdateAgentSurvey(request, target, options=(), channel_credentials=None, call_credentials=None,", "call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def UpdateAgentSurvey(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False,", "the server \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def GetSurveyAnswers(self,", "wait_for_ready, timeout, metadata) @staticmethod def DeleteAgentSurvey(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None,", ") self.UpdateSurvey = channel.unary_unary( '/ondewo.survey.Surveys/UpdateSurvey', request_serializer=ondewo_dot_survey_dot_survey__pb2.UpdateSurveyRequest.SerializeToString, response_deserializer=ondewo_dot_survey_dot_survey__pb2.Survey.FromString, ) self.DeleteSurvey = channel.unary_unary( '/ondewo.survey.Surveys/DeleteSurvey', request_serializer=ondewo_dot_survey_dot_survey__pb2.DeleteSurveyRequest.SerializeToString,", "), 'UpdateSurvey': grpc.unary_unary_rpc_method_handler( servicer.UpdateSurvey, request_deserializer=ondewo_dot_survey_dot_survey__pb2.UpdateSurveyRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.Survey.SerializeToString, ), 'DeleteSurvey': grpc.unary_unary_rpc_method_handler( servicer.DeleteSurvey, request_deserializer=ondewo_dot_survey_dot_survey__pb2.DeleteSurveyRequest.FromString, response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, ),", "@staticmethod def ListSurveys(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return", "compression, wait_for_ready, timeout, metadata) @staticmethod def GetAllSurveyAnswers(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None,", "wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/CreateAgentSurvey', ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.AgentSurveyResponse.FromString, options, channel_credentials, insecure, call_credentials,", "), 'CreateAgentSurvey': grpc.unary_unary_rpc_method_handler( servicer.CreateAgentSurvey, request_deserializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyResponse.SerializeToString, ), 'UpdateAgentSurvey': grpc.unary_unary_rpc_method_handler( servicer.UpdateAgentSurvey, request_deserializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.FromString, response_serializer=ondewo_dot_survey_dot_survey__pb2.AgentSurveyResponse.SerializeToString, ),", "metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/ListSurveys', ondewo_dot_survey_dot_survey__pb2.ListSurveysRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.ListSurveysResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready,", "agent in any session \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!')", "target, '/ondewo.survey.Surveys/CreateAgentSurvey', ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.AgentSurveyResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod", "survey_pb2 as ondewo_dot_survey_dot_survey__pb2 class SurveysStub(object): \"\"\"///// Services /////// \"\"\" def __init__(self, channel): \"\"\"Constructor.", "channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/CreateAgentSurvey', ondewo_dot_survey_dot_survey__pb2.AgentSurveyRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.AgentSurveyResponse.FromString,", "Services /////// \"\"\" def CreateSurvey(self, request, context): \"\"\"Create a Survey and an empty", "call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def DeleteAgentSurvey(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False,", "context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def GetSurveyAnswers(self, request, context): \"\"\"Retrieve answers", "target, '/ondewo.survey.Surveys/DeleteSurvey', ondewo_dot_survey_dot_survey__pb2.DeleteSurveyRequest.SerializeToString, google_dot_protobuf_dot_empty__pb2.Empty.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod", "compression, wait_for_ready, timeout, metadata) @staticmethod def GetSurvey(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None,", "timeout, metadata) @staticmethod def DeleteSurvey(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None,", "wait_for_ready, timeout, metadata) @staticmethod def DeleteSurvey(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None,", "grpc from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 from ondewo.survey import survey_pb2 as ondewo_dot_survey_dot_survey__pb2", "\"\"\"Returns the list of all surveys in the server \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not", "and return it \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def", "a survey \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def add_SurveysServicer_to_server(servicer,", "channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ondewo.survey.Surveys/ListSurveys', ondewo_dot_survey_dot_survey__pb2.ListSurveysRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.ListSurveysResponse.FromString,", "with a survey agent in any session \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise", "GetAllSurveyAnswers(self, request, context): \"\"\"Retrieve all answers to survey questions collected in interactions with", "ondewo_dot_survey_dot_survey__pb2.GetSurveyAnswersRequest.SerializeToString, ondewo_dot_survey_dot_survey__pb2.SurveyAnswersResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def GetAllSurveyAnswers(request,", "existent) \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def ListSurveys(self, request,", "DO NOT EDIT! \"\"\"Client and server classes corresponding to protobuf-defined services.\"\"\" import grpc", "associated to a survey \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!')", "metadata) @staticmethod def ListSurveys(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None):" ]
[ "op): setattr(OpcodeDefaults, op, True) class OpcodeStatus(object): _immutable_fields_ = [\"frozen\", \"use_flag_cache\"] + dual_implementation_opcodes def", "== \"AggStep\": self.AggFinal = value elif whichop == \"AggFinal\": self.AggStep = value def", "s == \"all\": for op in unrolling_dual_implementation_opcodes: setattr(self, op, False) return specs =", "== op: setattr(self, whichop, value) if whichop == \"Compare\": self.Jump = value elif", "op, value): if self.frozen: raise TypeError(\"too late to change\") if self.use_flag_cache: raise TypeError(\"can't", "elif whichop == \"AggStep\": self.AggFinal = value elif whichop == \"AggFinal\": self.AggStep =", "+ dual_implementation_opcodes def __init__(self, use_flag_cache): self.use_flag_cache = use_flag_cache self.frozen = False for op", "] unrolling_dual_implementation_opcodes = unrolling_iterable(dual_implementation_opcodes) class OpcodeDefaults(object): OpenRead_OpenWrite = False Cast = False OpcodeDefaults", "'AggStep', 'Affinity', 'Cast', 'CollSeq', 'Compare', 'Copy', 'EndCoroutine', 'Function', 'Gosub', 'Goto', 'IdxLE_IdxGT_IdxLT_IdxGE', 'IdxRowid', 'IfPos',", "OpcodeDefaults(object): OpenRead_OpenWrite = False Cast = False OpcodeDefaults = OpcodeDefaults() for op in", "raise TypeError(\"can't change if flag cache is used\") for whichop in unrolling_dual_implementation_opcodes: if", "whichop in unrolling_dual_implementation_opcodes: if whichop == op: setattr(self, whichop, value) if whichop ==", "op in unrolling_dual_implementation_opcodes: setattr(self, op, getattr(OpcodeDefaults, op)) def set_use_translated(self, op, value): if self.frozen:", "'Ne_Eq_Gt_Le_Lt_Ge', 'Next', 'NextIfOpen', 'NotExists', 'NotNull', 'Null', 'Once', 'OpenRead_OpenWrite', 'Real', 'RealAffinity', 'ResultRow', 'Return', 'SCopy',", "'IdxRowid', 'IfPos', 'IfZero', 'If_IfNot', 'InitCoroutine', 'Integer', 'IsNull', 'Jump', 'MakeRecord', 'Move', 'MustBeInt', 'Ne_Eq_Gt_Le_Lt_Ge', 'Next',", "= [\"frozen\", \"use_flag_cache\"] + dual_implementation_opcodes def __init__(self, use_flag_cache): self.use_flag_cache = use_flag_cache self.frozen =", "self.frozen: raise TypeError(\"too late to change\") if self.use_flag_cache: raise TypeError(\"can't change if flag", "= use_flag_cache self.frozen = False for op in unrolling_dual_implementation_opcodes: setattr(self, op, getattr(OpcodeDefaults, op))", "set_use_translated(self, op, value): if self.frozen: raise TypeError(\"too late to change\") if self.use_flag_cache: raise", "cache is used\") for whichop in unrolling_dual_implementation_opcodes: if whichop == op: setattr(self, whichop,", "elif whichop == \"AggFinal\": self.AggStep = value def freeze(self): if not self.frozen: self.frozen", "whichop == \"AggFinal\": self.AggStep = value def freeze(self): if not self.frozen: self.frozen =", "in dual_implementation_opcodes: if not hasattr(OpcodeDefaults, op): setattr(OpcodeDefaults, op, True) class OpcodeStatus(object): _immutable_fields_ =", "rpython.rlib.unroll import unrolling_iterable dual_implementation_opcodes = [ 'Add_Subtract_Multiply_Divide_Remainder', 'AggFinal', 'AggStep', 'Affinity', 'Cast', 'CollSeq', 'Compare',", "for op in unrolling_dual_implementation_opcodes: setattr(self, op, False) return specs = s.split(\":\") for spec", "[\"frozen\", \"use_flag_cache\"] + dual_implementation_opcodes def __init__(self, use_flag_cache): self.use_flag_cache = use_flag_cache self.frozen = False", "setattr(self, whichop, value) if whichop == \"Compare\": self.Jump = value elif whichop ==", "'AggFinal', 'AggStep', 'Affinity', 'Cast', 'CollSeq', 'Compare', 'Copy', 'EndCoroutine', 'Function', 'Gosub', 'Goto', 'IdxLE_IdxGT_IdxLT_IdxGE', 'IdxRowid',", "for op in unrolling_dual_implementation_opcodes: setattr(self, op, getattr(OpcodeDefaults, op)) def set_use_translated(self, op, value): if", "value elif whichop == \"Jump\": self.Compare = value elif whichop == \"AggStep\": self.AggFinal", "elif whichop == \"Jump\": self.Compare = value elif whichop == \"AggStep\": self.AggFinal =", "'Copy', 'EndCoroutine', 'Function', 'Gosub', 'Goto', 'IdxLE_IdxGT_IdxLT_IdxGE', 'IdxRowid', 'IfPos', 'IfZero', 'If_IfNot', 'InitCoroutine', 'Integer', 'IsNull',", "whichop == op: setattr(self, whichop, value) if whichop == \"Compare\": self.Jump = value", "if flag cache is used\") for whichop in unrolling_dual_implementation_opcodes: if whichop == op:", "'Function', 'Gosub', 'Goto', 'IdxLE_IdxGT_IdxLT_IdxGE', 'IdxRowid', 'IfPos', 'IfZero', 'If_IfNot', 'InitCoroutine', 'Integer', 'IsNull', 'Jump', 'MakeRecord',", "'Affinity', 'Cast', 'CollSeq', 'Compare', 'Copy', 'EndCoroutine', 'Function', 'Gosub', 'Goto', 'IdxLE_IdxGT_IdxLT_IdxGE', 'IdxRowid', 'IfPos', 'IfZero',", "op, True) class OpcodeStatus(object): _immutable_fields_ = [\"frozen\", \"use_flag_cache\"] + dual_implementation_opcodes def __init__(self, use_flag_cache):", "self.use_flag_cache: raise TypeError(\"can't change if flag cache is used\") for whichop in unrolling_dual_implementation_opcodes:", "'Compare', 'Copy', 'EndCoroutine', 'Function', 'Gosub', 'Goto', 'IdxLE_IdxGT_IdxLT_IdxGE', 'IdxRowid', 'IfPos', 'IfZero', 'If_IfNot', 'InitCoroutine', 'Integer',", "if self.use_flag_cache: raise TypeError(\"can't change if flag cache is used\") for whichop in", "'Null', 'Once', 'OpenRead_OpenWrite', 'Real', 'RealAffinity', 'ResultRow', 'Return', 'SCopy', 'Seek', 'SeekLT_SeekLE_SeekGE_SeekGT', 'Sequence', 'Variable', 'Yield',", "change if flag cache is used\") for whichop in unrolling_dual_implementation_opcodes: if whichop ==", "not hasattr(OpcodeDefaults, op): setattr(OpcodeDefaults, op, True) class OpcodeStatus(object): _immutable_fields_ = [\"frozen\", \"use_flag_cache\"] +", "OpcodeDefaults() for op in dual_implementation_opcodes: if not hasattr(OpcodeDefaults, op): setattr(OpcodeDefaults, op, True) class", "used\") for whichop in unrolling_dual_implementation_opcodes: if whichop == op: setattr(self, whichop, value) if", "op)) def set_use_translated(self, op, value): if self.frozen: raise TypeError(\"too late to change\") if", "== \"all\": for op in unrolling_dual_implementation_opcodes: setattr(self, op, False) return specs = s.split(\":\")", "not self.frozen: self.frozen = True def disable_from_cmdline(self, s): if s == \"all\": for", "= value elif whichop == \"Jump\": self.Compare = value elif whichop == \"AggStep\":", "'Real', 'RealAffinity', 'ResultRow', 'Return', 'SCopy', 'Seek', 'SeekLT_SeekLE_SeekGE_SeekGT', 'Sequence', 'Variable', 'Yield', ] unrolling_dual_implementation_opcodes =", "'Seek', 'SeekLT_SeekLE_SeekGE_SeekGT', 'Sequence', 'Variable', 'Yield', ] unrolling_dual_implementation_opcodes = unrolling_iterable(dual_implementation_opcodes) class OpcodeDefaults(object): OpenRead_OpenWrite =", "in unrolling_dual_implementation_opcodes: setattr(self, op, False) return specs = s.split(\":\") for spec in specs:", "\"Compare\": self.Jump = value elif whichop == \"Jump\": self.Compare = value elif whichop", "late to change\") if self.use_flag_cache: raise TypeError(\"can't change if flag cache is used\")", "_immutable_fields_ = [\"frozen\", \"use_flag_cache\"] + dual_implementation_opcodes def __init__(self, use_flag_cache): self.use_flag_cache = use_flag_cache self.frozen", "== \"Jump\": self.Compare = value elif whichop == \"AggStep\": self.AggFinal = value elif", "'Sequence', 'Variable', 'Yield', ] unrolling_dual_implementation_opcodes = unrolling_iterable(dual_implementation_opcodes) class OpcodeDefaults(object): OpenRead_OpenWrite = False Cast", "'IfPos', 'IfZero', 'If_IfNot', 'InitCoroutine', 'Integer', 'IsNull', 'Jump', 'MakeRecord', 'Move', 'MustBeInt', 'Ne_Eq_Gt_Le_Lt_Ge', 'Next', 'NextIfOpen',", "hasattr(OpcodeDefaults, op): setattr(OpcodeDefaults, op, True) class OpcodeStatus(object): _immutable_fields_ = [\"frozen\", \"use_flag_cache\"] + dual_implementation_opcodes", "whichop == \"Jump\": self.Compare = value elif whichop == \"AggStep\": self.AggFinal = value", "dual_implementation_opcodes: if not hasattr(OpcodeDefaults, op): setattr(OpcodeDefaults, op, True) class OpcodeStatus(object): _immutable_fields_ = [\"frozen\",", "False for op in unrolling_dual_implementation_opcodes: setattr(self, op, getattr(OpcodeDefaults, op)) def set_use_translated(self, op, value):", "for whichop in unrolling_dual_implementation_opcodes: if whichop == op: setattr(self, whichop, value) if whichop", "whichop == \"Compare\": self.Jump = value elif whichop == \"Jump\": self.Compare = value", "OpcodeDefaults = OpcodeDefaults() for op in dual_implementation_opcodes: if not hasattr(OpcodeDefaults, op): setattr(OpcodeDefaults, op,", "'Once', 'OpenRead_OpenWrite', 'Real', 'RealAffinity', 'ResultRow', 'Return', 'SCopy', 'Seek', 'SeekLT_SeekLE_SeekGE_SeekGT', 'Sequence', 'Variable', 'Yield', ]", "in unrolling_dual_implementation_opcodes: if whichop == op: setattr(self, whichop, value) if whichop == \"Compare\":", "'If_IfNot', 'InitCoroutine', 'Integer', 'IsNull', 'Jump', 'MakeRecord', 'Move', 'MustBeInt', 'Ne_Eq_Gt_Le_Lt_Ge', 'Next', 'NextIfOpen', 'NotExists', 'NotNull',", "use_flag_cache self.frozen = False for op in unrolling_dual_implementation_opcodes: setattr(self, op, getattr(OpcodeDefaults, op)) def", "__init__(self, use_flag_cache): self.use_flag_cache = use_flag_cache self.frozen = False for op in unrolling_dual_implementation_opcodes: setattr(self,", "is used\") for whichop in unrolling_dual_implementation_opcodes: if whichop == op: setattr(self, whichop, value)", "op in unrolling_dual_implementation_opcodes: setattr(self, op, False) return specs = s.split(\":\") for spec in", "'NotNull', 'Null', 'Once', 'OpenRead_OpenWrite', 'Real', 'RealAffinity', 'ResultRow', 'Return', 'SCopy', 'Seek', 'SeekLT_SeekLE_SeekGE_SeekGT', 'Sequence', 'Variable',", "self.Jump = value elif whichop == \"Jump\": self.Compare = value elif whichop ==", "\"AggFinal\": self.AggStep = value def freeze(self): if not self.frozen: self.frozen = True def", "op in dual_implementation_opcodes: if not hasattr(OpcodeDefaults, op): setattr(OpcodeDefaults, op, True) class OpcodeStatus(object): _immutable_fields_", "'Gosub', 'Goto', 'IdxLE_IdxGT_IdxLT_IdxGE', 'IdxRowid', 'IfPos', 'IfZero', 'If_IfNot', 'InitCoroutine', 'Integer', 'IsNull', 'Jump', 'MakeRecord', 'Move',", "\"all\": for op in unrolling_dual_implementation_opcodes: setattr(self, op, False) return specs = s.split(\":\") for", "unrolling_iterable dual_implementation_opcodes = [ 'Add_Subtract_Multiply_Divide_Remainder', 'AggFinal', 'AggStep', 'Affinity', 'Cast', 'CollSeq', 'Compare', 'Copy', 'EndCoroutine',", "\"use_flag_cache\"] + dual_implementation_opcodes def __init__(self, use_flag_cache): self.use_flag_cache = use_flag_cache self.frozen = False for", "Cast = False OpcodeDefaults = OpcodeDefaults() for op in dual_implementation_opcodes: if not hasattr(OpcodeDefaults,", "[ 'Add_Subtract_Multiply_Divide_Remainder', 'AggFinal', 'AggStep', 'Affinity', 'Cast', 'CollSeq', 'Compare', 'Copy', 'EndCoroutine', 'Function', 'Gosub', 'Goto',", "to change\") if self.use_flag_cache: raise TypeError(\"can't change if flag cache is used\") for", "def freeze(self): if not self.frozen: self.frozen = True def disable_from_cmdline(self, s): if s", "class OpcodeDefaults(object): OpenRead_OpenWrite = False Cast = False OpcodeDefaults = OpcodeDefaults() for op", "'EndCoroutine', 'Function', 'Gosub', 'Goto', 'IdxLE_IdxGT_IdxLT_IdxGE', 'IdxRowid', 'IfPos', 'IfZero', 'If_IfNot', 'InitCoroutine', 'Integer', 'IsNull', 'Jump',", "\"AggStep\": self.AggFinal = value elif whichop == \"AggFinal\": self.AggStep = value def freeze(self):", "'Variable', 'Yield', ] unrolling_dual_implementation_opcodes = unrolling_iterable(dual_implementation_opcodes) class OpcodeDefaults(object): OpenRead_OpenWrite = False Cast =", "s): if s == \"all\": for op in unrolling_dual_implementation_opcodes: setattr(self, op, False) return", "= False Cast = False OpcodeDefaults = OpcodeDefaults() for op in dual_implementation_opcodes: if", "value): if self.frozen: raise TypeError(\"too late to change\") if self.use_flag_cache: raise TypeError(\"can't change", "self.frozen: self.frozen = True def disable_from_cmdline(self, s): if s == \"all\": for op", "self.frozen = True def disable_from_cmdline(self, s): if s == \"all\": for op in", "'Integer', 'IsNull', 'Jump', 'MakeRecord', 'Move', 'MustBeInt', 'Ne_Eq_Gt_Le_Lt_Ge', 'Next', 'NextIfOpen', 'NotExists', 'NotNull', 'Null', 'Once',", "self.AggFinal = value elif whichop == \"AggFinal\": self.AggStep = value def freeze(self): if", "import unrolling_iterable dual_implementation_opcodes = [ 'Add_Subtract_Multiply_Divide_Remainder', 'AggFinal', 'AggStep', 'Affinity', 'Cast', 'CollSeq', 'Compare', 'Copy',", "'IsNull', 'Jump', 'MakeRecord', 'Move', 'MustBeInt', 'Ne_Eq_Gt_Le_Lt_Ge', 'Next', 'NextIfOpen', 'NotExists', 'NotNull', 'Null', 'Once', 'OpenRead_OpenWrite',", "whichop, value) if whichop == \"Compare\": self.Jump = value elif whichop == \"Jump\":", "unrolling_dual_implementation_opcodes: if whichop == op: setattr(self, whichop, value) if whichop == \"Compare\": self.Jump", "value elif whichop == \"AggFinal\": self.AggStep = value def freeze(self): if not self.frozen:", "False) return specs = s.split(\":\") for spec in specs: if spec: self.set_use_translated(spec, False)", "'InitCoroutine', 'Integer', 'IsNull', 'Jump', 'MakeRecord', 'Move', 'MustBeInt', 'Ne_Eq_Gt_Le_Lt_Ge', 'Next', 'NextIfOpen', 'NotExists', 'NotNull', 'Null',", "'ResultRow', 'Return', 'SCopy', 'Seek', 'SeekLT_SeekLE_SeekGE_SeekGT', 'Sequence', 'Variable', 'Yield', ] unrolling_dual_implementation_opcodes = unrolling_iterable(dual_implementation_opcodes) class", "dual_implementation_opcodes def __init__(self, use_flag_cache): self.use_flag_cache = use_flag_cache self.frozen = False for op in", "= [ 'Add_Subtract_Multiply_Divide_Remainder', 'AggFinal', 'AggStep', 'Affinity', 'Cast', 'CollSeq', 'Compare', 'Copy', 'EndCoroutine', 'Function', 'Gosub',", "= False for op in unrolling_dual_implementation_opcodes: setattr(self, op, getattr(OpcodeDefaults, op)) def set_use_translated(self, op,", "== \"Compare\": self.Jump = value elif whichop == \"Jump\": self.Compare = value elif", "'Cast', 'CollSeq', 'Compare', 'Copy', 'EndCoroutine', 'Function', 'Gosub', 'Goto', 'IdxLE_IdxGT_IdxLT_IdxGE', 'IdxRowid', 'IfPos', 'IfZero', 'If_IfNot',", "setattr(self, op, getattr(OpcodeDefaults, op)) def set_use_translated(self, op, value): if self.frozen: raise TypeError(\"too late", "from rpython.rlib.unroll import unrolling_iterable dual_implementation_opcodes = [ 'Add_Subtract_Multiply_Divide_Remainder', 'AggFinal', 'AggStep', 'Affinity', 'Cast', 'CollSeq',", "setattr(self, op, False) return specs = s.split(\":\") for spec in specs: if spec:", "value def freeze(self): if not self.frozen: self.frozen = True def disable_from_cmdline(self, s): if", "op, getattr(OpcodeDefaults, op)) def set_use_translated(self, op, value): if self.frozen: raise TypeError(\"too late to", "getattr(OpcodeDefaults, op)) def set_use_translated(self, op, value): if self.frozen: raise TypeError(\"too late to change\")", "if not self.frozen: self.frozen = True def disable_from_cmdline(self, s): if s == \"all\":", "'NextIfOpen', 'NotExists', 'NotNull', 'Null', 'Once', 'OpenRead_OpenWrite', 'Real', 'RealAffinity', 'ResultRow', 'Return', 'SCopy', 'Seek', 'SeekLT_SeekLE_SeekGE_SeekGT',", "'Next', 'NextIfOpen', 'NotExists', 'NotNull', 'Null', 'Once', 'OpenRead_OpenWrite', 'Real', 'RealAffinity', 'ResultRow', 'Return', 'SCopy', 'Seek',", "value elif whichop == \"AggStep\": self.AggFinal = value elif whichop == \"AggFinal\": self.AggStep", "= False OpcodeDefaults = OpcodeDefaults() for op in dual_implementation_opcodes: if not hasattr(OpcodeDefaults, op):", "unrolling_iterable(dual_implementation_opcodes) class OpcodeDefaults(object): OpenRead_OpenWrite = False Cast = False OpcodeDefaults = OpcodeDefaults() for", "'NotExists', 'NotNull', 'Null', 'Once', 'OpenRead_OpenWrite', 'Real', 'RealAffinity', 'ResultRow', 'Return', 'SCopy', 'Seek', 'SeekLT_SeekLE_SeekGE_SeekGT', 'Sequence',", "= True def disable_from_cmdline(self, s): if s == \"all\": for op in unrolling_dual_implementation_opcodes:", "'IfZero', 'If_IfNot', 'InitCoroutine', 'Integer', 'IsNull', 'Jump', 'MakeRecord', 'Move', 'MustBeInt', 'Ne_Eq_Gt_Le_Lt_Ge', 'Next', 'NextIfOpen', 'NotExists',", "'SeekLT_SeekLE_SeekGE_SeekGT', 'Sequence', 'Variable', 'Yield', ] unrolling_dual_implementation_opcodes = unrolling_iterable(dual_implementation_opcodes) class OpcodeDefaults(object): OpenRead_OpenWrite = False", "unrolling_dual_implementation_opcodes = unrolling_iterable(dual_implementation_opcodes) class OpcodeDefaults(object): OpenRead_OpenWrite = False Cast = False OpcodeDefaults =", "'Add_Subtract_Multiply_Divide_Remainder', 'AggFinal', 'AggStep', 'Affinity', 'Cast', 'CollSeq', 'Compare', 'Copy', 'EndCoroutine', 'Function', 'Gosub', 'Goto', 'IdxLE_IdxGT_IdxLT_IdxGE',", "= OpcodeDefaults() for op in dual_implementation_opcodes: if not hasattr(OpcodeDefaults, op): setattr(OpcodeDefaults, op, True)", "True def disable_from_cmdline(self, s): if s == \"all\": for op in unrolling_dual_implementation_opcodes: setattr(self,", "'Jump', 'MakeRecord', 'Move', 'MustBeInt', 'Ne_Eq_Gt_Le_Lt_Ge', 'Next', 'NextIfOpen', 'NotExists', 'NotNull', 'Null', 'Once', 'OpenRead_OpenWrite', 'Real',", "False OpcodeDefaults = OpcodeDefaults() for op in dual_implementation_opcodes: if not hasattr(OpcodeDefaults, op): setattr(OpcodeDefaults,", "OpcodeStatus(object): _immutable_fields_ = [\"frozen\", \"use_flag_cache\"] + dual_implementation_opcodes def __init__(self, use_flag_cache): self.use_flag_cache = use_flag_cache", "dual_implementation_opcodes = [ 'Add_Subtract_Multiply_Divide_Remainder', 'AggFinal', 'AggStep', 'Affinity', 'Cast', 'CollSeq', 'Compare', 'Copy', 'EndCoroutine', 'Function',", "value) if whichop == \"Compare\": self.Jump = value elif whichop == \"Jump\": self.Compare", "if whichop == \"Compare\": self.Jump = value elif whichop == \"Jump\": self.Compare =", "if whichop == op: setattr(self, whichop, value) if whichop == \"Compare\": self.Jump =", "= value elif whichop == \"AggStep\": self.AggFinal = value elif whichop == \"AggFinal\":", "True) class OpcodeStatus(object): _immutable_fields_ = [\"frozen\", \"use_flag_cache\"] + dual_implementation_opcodes def __init__(self, use_flag_cache): self.use_flag_cache", "'MustBeInt', 'Ne_Eq_Gt_Le_Lt_Ge', 'Next', 'NextIfOpen', 'NotExists', 'NotNull', 'Null', 'Once', 'OpenRead_OpenWrite', 'Real', 'RealAffinity', 'ResultRow', 'Return',", "in unrolling_dual_implementation_opcodes: setattr(self, op, getattr(OpcodeDefaults, op)) def set_use_translated(self, op, value): if self.frozen: raise", "self.use_flag_cache = use_flag_cache self.frozen = False for op in unrolling_dual_implementation_opcodes: setattr(self, op, getattr(OpcodeDefaults,", "class OpcodeStatus(object): _immutable_fields_ = [\"frozen\", \"use_flag_cache\"] + dual_implementation_opcodes def __init__(self, use_flag_cache): self.use_flag_cache =", "OpenRead_OpenWrite = False Cast = False OpcodeDefaults = OpcodeDefaults() for op in dual_implementation_opcodes:", "'IdxLE_IdxGT_IdxLT_IdxGE', 'IdxRowid', 'IfPos', 'IfZero', 'If_IfNot', 'InitCoroutine', 'Integer', 'IsNull', 'Jump', 'MakeRecord', 'Move', 'MustBeInt', 'Ne_Eq_Gt_Le_Lt_Ge',", "= value def freeze(self): if not self.frozen: self.frozen = True def disable_from_cmdline(self, s):", "def __init__(self, use_flag_cache): self.use_flag_cache = use_flag_cache self.frozen = False for op in unrolling_dual_implementation_opcodes:", "unrolling_dual_implementation_opcodes: setattr(self, op, False) return specs = s.split(\":\") for spec in specs: if", "TypeError(\"too late to change\") if self.use_flag_cache: raise TypeError(\"can't change if flag cache is", "'MakeRecord', 'Move', 'MustBeInt', 'Ne_Eq_Gt_Le_Lt_Ge', 'Next', 'NextIfOpen', 'NotExists', 'NotNull', 'Null', 'Once', 'OpenRead_OpenWrite', 'Real', 'RealAffinity',", "op, False) return specs = s.split(\":\") for spec in specs: if spec: self.set_use_translated(spec,", "flag cache is used\") for whichop in unrolling_dual_implementation_opcodes: if whichop == op: setattr(self,", "'CollSeq', 'Compare', 'Copy', 'EndCoroutine', 'Function', 'Gosub', 'Goto', 'IdxLE_IdxGT_IdxLT_IdxGE', 'IdxRowid', 'IfPos', 'IfZero', 'If_IfNot', 'InitCoroutine',", "'Move', 'MustBeInt', 'Ne_Eq_Gt_Le_Lt_Ge', 'Next', 'NextIfOpen', 'NotExists', 'NotNull', 'Null', 'Once', 'OpenRead_OpenWrite', 'Real', 'RealAffinity', 'ResultRow',", "self.frozen = False for op in unrolling_dual_implementation_opcodes: setattr(self, op, getattr(OpcodeDefaults, op)) def set_use_translated(self,", "TypeError(\"can't change if flag cache is used\") for whichop in unrolling_dual_implementation_opcodes: if whichop", "= value elif whichop == \"AggFinal\": self.AggStep = value def freeze(self): if not", "op: setattr(self, whichop, value) if whichop == \"Compare\": self.Jump = value elif whichop", "'RealAffinity', 'ResultRow', 'Return', 'SCopy', 'Seek', 'SeekLT_SeekLE_SeekGE_SeekGT', 'Sequence', 'Variable', 'Yield', ] unrolling_dual_implementation_opcodes = unrolling_iterable(dual_implementation_opcodes)", "def disable_from_cmdline(self, s): if s == \"all\": for op in unrolling_dual_implementation_opcodes: setattr(self, op,", "self.AggStep = value def freeze(self): if not self.frozen: self.frozen = True def disable_from_cmdline(self,", "change\") if self.use_flag_cache: raise TypeError(\"can't change if flag cache is used\") for whichop", "if s == \"all\": for op in unrolling_dual_implementation_opcodes: setattr(self, op, False) return specs", "setattr(OpcodeDefaults, op, True) class OpcodeStatus(object): _immutable_fields_ = [\"frozen\", \"use_flag_cache\"] + dual_implementation_opcodes def __init__(self,", "if self.frozen: raise TypeError(\"too late to change\") if self.use_flag_cache: raise TypeError(\"can't change if", "False Cast = False OpcodeDefaults = OpcodeDefaults() for op in dual_implementation_opcodes: if not", "\"Jump\": self.Compare = value elif whichop == \"AggStep\": self.AggFinal = value elif whichop", "whichop == \"AggStep\": self.AggFinal = value elif whichop == \"AggFinal\": self.AggStep = value", "disable_from_cmdline(self, s): if s == \"all\": for op in unrolling_dual_implementation_opcodes: setattr(self, op, False)", "'Return', 'SCopy', 'Seek', 'SeekLT_SeekLE_SeekGE_SeekGT', 'Sequence', 'Variable', 'Yield', ] unrolling_dual_implementation_opcodes = unrolling_iterable(dual_implementation_opcodes) class OpcodeDefaults(object):", "use_flag_cache): self.use_flag_cache = use_flag_cache self.frozen = False for op in unrolling_dual_implementation_opcodes: setattr(self, op,", "for op in dual_implementation_opcodes: if not hasattr(OpcodeDefaults, op): setattr(OpcodeDefaults, op, True) class OpcodeStatus(object):", "if not hasattr(OpcodeDefaults, op): setattr(OpcodeDefaults, op, True) class OpcodeStatus(object): _immutable_fields_ = [\"frozen\", \"use_flag_cache\"]", "== \"AggFinal\": self.AggStep = value def freeze(self): if not self.frozen: self.frozen = True", "unrolling_dual_implementation_opcodes: setattr(self, op, getattr(OpcodeDefaults, op)) def set_use_translated(self, op, value): if self.frozen: raise TypeError(\"too", "'Goto', 'IdxLE_IdxGT_IdxLT_IdxGE', 'IdxRowid', 'IfPos', 'IfZero', 'If_IfNot', 'InitCoroutine', 'Integer', 'IsNull', 'Jump', 'MakeRecord', 'Move', 'MustBeInt',", "'SCopy', 'Seek', 'SeekLT_SeekLE_SeekGE_SeekGT', 'Sequence', 'Variable', 'Yield', ] unrolling_dual_implementation_opcodes = unrolling_iterable(dual_implementation_opcodes) class OpcodeDefaults(object): OpenRead_OpenWrite", "= unrolling_iterable(dual_implementation_opcodes) class OpcodeDefaults(object): OpenRead_OpenWrite = False Cast = False OpcodeDefaults = OpcodeDefaults()", "'Yield', ] unrolling_dual_implementation_opcodes = unrolling_iterable(dual_implementation_opcodes) class OpcodeDefaults(object): OpenRead_OpenWrite = False Cast = False", "def set_use_translated(self, op, value): if self.frozen: raise TypeError(\"too late to change\") if self.use_flag_cache:", "'OpenRead_OpenWrite', 'Real', 'RealAffinity', 'ResultRow', 'Return', 'SCopy', 'Seek', 'SeekLT_SeekLE_SeekGE_SeekGT', 'Sequence', 'Variable', 'Yield', ] unrolling_dual_implementation_opcodes", "self.Compare = value elif whichop == \"AggStep\": self.AggFinal = value elif whichop ==", "raise TypeError(\"too late to change\") if self.use_flag_cache: raise TypeError(\"can't change if flag cache", "freeze(self): if not self.frozen: self.frozen = True def disable_from_cmdline(self, s): if s ==" ]
[ "an ID, skip if there is not one try: od['Name'] = record['Cities']['GEO.display-label'] except", "try: od['Name'] = record['Cities']['GEO.display-label'] except KeyError: pass cities_list.append(od) with open(\"output.json\", \"w\") as f:", "key_File = open(\"../resources/key\") BASE_ID = keyFile.readline().rstrip() # found in url of API documentation", "record['Cities']['GEO.id'] except KeyError: continue # every record needs an ID, skip if there", "pip install airtable # pip install airtable-python-wrapper import json import airtable from airtable", "def create_cities_object(): return Airtable(BASE_ID, CITIES_TABLE) def airtable_call(): airtable_object = Airtable(BASE_ID, CITIES_TABLE) records =", "CITIES_TABLE = \"USCities\" def create_cities_object(): return Airtable(BASE_ID, CITIES_TABLE) def airtable_call(): airtable_object = Airtable(BASE_ID,", "every record needs an ID, skip if there is not one try: od['Name']", "# requires # pip install airtable # pip install airtable-python-wrapper import json import", "{'Id': []} # Original Dictionary try: od['Id'] = record['Cities']['GEO.id'] except KeyError: continue #", "skip if there is not one try: od['Name'] = record['Cities']['GEO.display-label'] except KeyError: pass", "url of API documentation for table CITIES_TABLE = \"USCities\" def create_cities_object(): return Airtable(BASE_ID,", "BASE_ID = keyFile.readline().rstrip() # found in url of API documentation for table CITIES_TABLE", "= keyFile.readline().rstrip() # found in url of API documentation for table CITIES_TABLE =", "except KeyError: continue # every record needs an ID, skip if there is", "def airtable_call(): airtable_object = Airtable(BASE_ID, CITIES_TABLE) records = airtable_object.get_all() cities_list = [] for", "requires # pip install airtable # pip install airtable-python-wrapper import json import airtable", "airtable_object.get_all() cities_list = [] for record in records: od = {'Id': []} #", "<filename>src/cities.py # requires # pip install airtable # pip install airtable-python-wrapper import json", "od = {'Id': []} # Original Dictionary try: od['Id'] = record['Cities']['GEO.id'] except KeyError:", "= \"USCities\" def create_cities_object(): return Airtable(BASE_ID, CITIES_TABLE) def airtable_call(): airtable_object = Airtable(BASE_ID, CITIES_TABLE)", "import json import airtable from airtable import Airtable key_File = open(\"../resources/key\") BASE_ID =", "except KeyError: pass cities_list.append(od) with open(\"output.json\", \"w\") as f: json.dump(service_list, f, indent=2) return", "= open(\"../resources/key\") BASE_ID = keyFile.readline().rstrip() # found in url of API documentation for", "Airtable(BASE_ID, CITIES_TABLE) def airtable_call(): airtable_object = Airtable(BASE_ID, CITIES_TABLE) records = airtable_object.get_all() cities_list =", "# Original Dictionary try: od['Id'] = record['Cities']['GEO.id'] except KeyError: continue # every record", "found in url of API documentation for table CITIES_TABLE = \"USCities\" def create_cities_object():", "record in records: od = {'Id': []} # Original Dictionary try: od['Id'] =", "= record['Cities']['GEO.id'] except KeyError: continue # every record needs an ID, skip if", "od['Id'] = record['Cities']['GEO.id'] except KeyError: continue # every record needs an ID, skip", "# pip install airtable # pip install airtable-python-wrapper import json import airtable from", "airtable from airtable import Airtable key_File = open(\"../resources/key\") BASE_ID = keyFile.readline().rstrip() # found", "from airtable import Airtable key_File = open(\"../resources/key\") BASE_ID = keyFile.readline().rstrip() # found in", "install airtable # pip install airtable-python-wrapper import json import airtable from airtable import", "cities_list = [] for record in records: od = {'Id': []} # Original", "record['Cities']['GEO.display-label'] except KeyError: pass cities_list.append(od) with open(\"output.json\", \"w\") as f: json.dump(service_list, f, indent=2)", "if there is not one try: od['Name'] = record['Cities']['GEO.display-label'] except KeyError: pass cities_list.append(od)", "CITIES_TABLE) records = airtable_object.get_all() cities_list = [] for record in records: od =", "return Airtable(BASE_ID, CITIES_TABLE) def airtable_call(): airtable_object = Airtable(BASE_ID, CITIES_TABLE) records = airtable_object.get_all() cities_list", "CITIES_TABLE) def airtable_call(): airtable_object = Airtable(BASE_ID, CITIES_TABLE) records = airtable_object.get_all() cities_list = []", "in url of API documentation for table CITIES_TABLE = \"USCities\" def create_cities_object(): return", "airtable # pip install airtable-python-wrapper import json import airtable from airtable import Airtable", "record needs an ID, skip if there is not one try: od['Name'] =", "one try: od['Name'] = record['Cities']['GEO.display-label'] except KeyError: pass cities_list.append(od) with open(\"output.json\", \"w\") as", "try: od['Id'] = record['Cities']['GEO.id'] except KeyError: continue # every record needs an ID,", "records: od = {'Id': []} # Original Dictionary try: od['Id'] = record['Cities']['GEO.id'] except", "= Airtable(BASE_ID, CITIES_TABLE) records = airtable_object.get_all() cities_list = [] for record in records:", "Airtable(BASE_ID, CITIES_TABLE) records = airtable_object.get_all() cities_list = [] for record in records: od", "for record in records: od = {'Id': []} # Original Dictionary try: od['Id']", "there is not one try: od['Name'] = record['Cities']['GEO.display-label'] except KeyError: pass cities_list.append(od) with", "# found in url of API documentation for table CITIES_TABLE = \"USCities\" def", "for table CITIES_TABLE = \"USCities\" def create_cities_object(): return Airtable(BASE_ID, CITIES_TABLE) def airtable_call(): airtable_object", "KeyError: continue # every record needs an ID, skip if there is not", "create_cities_object(): return Airtable(BASE_ID, CITIES_TABLE) def airtable_call(): airtable_object = Airtable(BASE_ID, CITIES_TABLE) records = airtable_object.get_all()", "= airtable_object.get_all() cities_list = [] for record in records: od = {'Id': []}", "= {'Id': []} # Original Dictionary try: od['Id'] = record['Cities']['GEO.id'] except KeyError: continue", "API documentation for table CITIES_TABLE = \"USCities\" def create_cities_object(): return Airtable(BASE_ID, CITIES_TABLE) def", "[]} # Original Dictionary try: od['Id'] = record['Cities']['GEO.id'] except KeyError: continue # every", "= [] for record in records: od = {'Id': []} # Original Dictionary", "pip install airtable-python-wrapper import json import airtable from airtable import Airtable key_File =", "\"USCities\" def create_cities_object(): return Airtable(BASE_ID, CITIES_TABLE) def airtable_call(): airtable_object = Airtable(BASE_ID, CITIES_TABLE) records", "[] for record in records: od = {'Id': []} # Original Dictionary try:", "airtable-python-wrapper import json import airtable from airtable import Airtable key_File = open(\"../resources/key\") BASE_ID", "is not one try: od['Name'] = record['Cities']['GEO.display-label'] except KeyError: pass cities_list.append(od) with open(\"output.json\",", "import airtable from airtable import Airtable key_File = open(\"../resources/key\") BASE_ID = keyFile.readline().rstrip() #", "airtable_call(): airtable_object = Airtable(BASE_ID, CITIES_TABLE) records = airtable_object.get_all() cities_list = [] for record", "open(\"../resources/key\") BASE_ID = keyFile.readline().rstrip() # found in url of API documentation for table", "= record['Cities']['GEO.display-label'] except KeyError: pass cities_list.append(od) with open(\"output.json\", \"w\") as f: json.dump(service_list, f,", "json import airtable from airtable import Airtable key_File = open(\"../resources/key\") BASE_ID = keyFile.readline().rstrip()", "table CITIES_TABLE = \"USCities\" def create_cities_object(): return Airtable(BASE_ID, CITIES_TABLE) def airtable_call(): airtable_object =", "Dictionary try: od['Id'] = record['Cities']['GEO.id'] except KeyError: continue # every record needs an", "od['Name'] = record['Cities']['GEO.display-label'] except KeyError: pass cities_list.append(od) with open(\"output.json\", \"w\") as f: json.dump(service_list,", "KeyError: pass cities_list.append(od) with open(\"output.json\", \"w\") as f: json.dump(service_list, f, indent=2) return cities_list", "Original Dictionary try: od['Id'] = record['Cities']['GEO.id'] except KeyError: continue # every record needs", "not one try: od['Name'] = record['Cities']['GEO.display-label'] except KeyError: pass cities_list.append(od) with open(\"output.json\", \"w\")", "keyFile.readline().rstrip() # found in url of API documentation for table CITIES_TABLE = \"USCities\"", "airtable_object = Airtable(BASE_ID, CITIES_TABLE) records = airtable_object.get_all() cities_list = [] for record in", "airtable import Airtable key_File = open(\"../resources/key\") BASE_ID = keyFile.readline().rstrip() # found in url", "in records: od = {'Id': []} # Original Dictionary try: od['Id'] = record['Cities']['GEO.id']", "install airtable-python-wrapper import json import airtable from airtable import Airtable key_File = open(\"../resources/key\")", "ID, skip if there is not one try: od['Name'] = record['Cities']['GEO.display-label'] except KeyError:", "of API documentation for table CITIES_TABLE = \"USCities\" def create_cities_object(): return Airtable(BASE_ID, CITIES_TABLE)", "import Airtable key_File = open(\"../resources/key\") BASE_ID = keyFile.readline().rstrip() # found in url of", "# every record needs an ID, skip if there is not one try:", "continue # every record needs an ID, skip if there is not one", "Airtable key_File = open(\"../resources/key\") BASE_ID = keyFile.readline().rstrip() # found in url of API", "needs an ID, skip if there is not one try: od['Name'] = record['Cities']['GEO.display-label']", "# pip install airtable-python-wrapper import json import airtable from airtable import Airtable key_File", "documentation for table CITIES_TABLE = \"USCities\" def create_cities_object(): return Airtable(BASE_ID, CITIES_TABLE) def airtable_call():", "records = airtable_object.get_all() cities_list = [] for record in records: od = {'Id':" ]
[ "the sums of hits against all their articles from article_hits database view\"\"\" \"\"\"Example", "from executed command and formats for report viewing\"\"\" print \"\\r\\nOn which days did", "popular authors of all time.\"\"\" popular_authors() \"\"\"Generate a report of the day(s) with", "SUM( article_hits.hits ) FROM authors, articles, article_hits WHERE article_hits.article_title = articles.title AND authors.id", "= \"news\" TOP_ARTICLES = \"3\" def get_article_counts(parameters): \"\"\"Return all posts from the 'database',", "to errors?\\r\\n\" for row in c.fetchall(): date = row[0] error_percent = 100 -", "connection\"\"\" db.close() def error_report(): \"\"\"Return all posts from the 'database', most recent first.\"\"\"", "\"\"\"SELECT to_char(date, 'FMMonth FMDD, FMYYYY') as day, percent_good FROM errors WHERE percent_good <", "* FROM article_hits limit (%s);''' data = (bleach.clean(parameters),) \"\"\"Execute sends command to the", "command to the database\"\"\" c.execute(sql) \"\"\"Return all info from executed command & formats", "the database to perform operations\"\"\" c = db.cursor() \"\"\"Pull correlated data of authors", "database to perform operations\"\"\" c = db.cursor() \"\"\"Pull article titles and hit counts", "c = db.cursor() \"\"\"Pull article titles and hit counts from article_hits database view\"\"\"", "authors.name, SUM( article_hits.hits ) FROM authors, articles, article_hits WHERE article_hits.article_title = articles.title AND", "from errors view of database with more than 1% error\"\"\" \"\"\"Ex. - ('July", "perform operations\"\"\" c = db.cursor() \"\"\"Pull correlated data of authors and the sums", "+ \"\"\" views\"\"\" \"\"\"Closes database connection\"\"\" db.close() def error_report(): \"\"\"Return all posts from", "the most popular 3 articles of all time.\"\"\" get_article_counts(TOP_ARTICLES) \"\"\"Generate a report of", "operations\"\"\" c = db.cursor() \"\"\"Pull correlated data of authors and the sums of", "\"\"\" + str(error_percent) + \"\"\"% errors\"\"\" \"\"\"Closes database connection\"\"\" db.close() def report(): \"\"\"Generate", "to the database\"\"\" c.execute(sql, data) \"\"\"Returns all information from executed command\"\"\" print \"\"\"\\r\\nWhat", "\"\"\"Pull days from errors view of database with more than 1% error\"\"\" \"\"\"Ex.", "popular article authors of all time?\\r\\n\"\"\" for row in c.fetchall(): author = row[0]", "of requests lead to errors?\\r\\n\" for row in c.fetchall(): date = row[0] error_percent", "recent first.\"\"\" \"\"\"Open PostGRES database session and returns a new connection instance\"\"\" db", "articles.author GROUP BY authors.name ORDER BY sum desc;\"\"\" \"\"\"Execute sends command to the", "articles from article_hits database view\"\"\" \"\"\"Example - ('<NAME>', Decimal('507594'))\"\"\" sql = \"\"\"SELECT authors.name,", "db.close() def popular_authors(): \"\"\"Return all posts from the 'database', most recent first.\"\"\" \"\"\"Open", "str(error_percent) + \"\"\"% errors\"\"\" \"\"\"Closes database connection\"\"\" db.close() def report(): \"\"\"Generate a report", "which days did more than 1% of requests lead to errors?\\r\\n\" for row", "days from errors view of database with more than 1% error\"\"\" \"\"\"Ex. -", "print '''\"''' + str(title) + '''\" - ''' + str(count) + \"\"\" views\"\"\"", "c = db.cursor() \"\"\"Pull correlated data of authors and the sums of hits", "data of authors and the sums of hits against all their articles from", "python \"\"\"Database article and errors report generator.\"\"\" import datetime import psycopg2 import bleach", "days did more than 1% of requests lead to errors?\\r\\n\" for row in", "sql = '''SELECT * FROM article_hits limit (%s);''' data = (bleach.clean(parameters),) \"\"\"Execute sends", "import psycopg2 import bleach DBNAME = \"news\" TOP_ARTICLES = \"3\" def get_article_counts(parameters): \"\"\"Return", "instance\"\"\" db = psycopg2.connect(database=DBNAME) \"\"\"Sets up interaction with the database to perform operations\"\"\"", "get_article_counts(parameters): \"\"\"Return all posts from the 'database', most recent first.\"\"\" \"\"\"Open PostGRES database", "c.fetchall(): author = row[0] count = row[1] print str(author) + \"\"\" - \"\"\"", "psycopg2.connect(database=DBNAME) \"\"\"Sets up interaction with the database to perform operations\"\"\" c = db.cursor()", "FROM article_hits limit (%s);''' data = (bleach.clean(parameters),) \"\"\"Execute sends command to the database\"\"\"", "- ('<NAME>', Decimal('507594'))\"\"\" sql = \"\"\"SELECT authors.name, SUM( article_hits.hits ) FROM authors, articles,", "def get_article_counts(parameters): \"\"\"Return all posts from the 'database', most recent first.\"\"\" \"\"\"Open PostGRES", "are the most popular three articles of all time?\\r\\n\"\"\" for row in c.fetchall():", "bleach DBNAME = \"news\" TOP_ARTICLES = \"3\" def get_article_counts(parameters): \"\"\"Return all posts from", "time?\\r\\n\"\"\" for row in c.fetchall(): author = row[0] count = row[1] print str(author)", "articles of all time.\"\"\" get_article_counts(TOP_ARTICLES) \"\"\"Generate a report of the most popular authors", "row[0] count = row[1] print '''\"''' + str(title) + '''\" - ''' +", "of authors and the sums of hits against all their articles from article_hits", "row[0] error_percent = 100 - row[1] print str(date) + \"\"\" - \"\"\" +", "DBNAME = \"news\" TOP_ARTICLES = \"3\" def get_article_counts(parameters): \"\"\"Return all posts from the", "(bleach.clean(parameters),) \"\"\"Execute sends command to the database\"\"\" c.execute(sql, data) \"\"\"Returns all information from", "c = db.cursor() \"\"\"Pull days from errors view of database with more than", "\"\"\" + str(count) + \"\"\" views\"\"\" \"\"\"Closes database connection\"\"\" db.close() def error_report(): \"\"\"Return", "view\"\"\" \"\"\"Example - ('<NAME>', Decimal('507594'))\"\"\" sql = \"\"\"SELECT authors.name, SUM( article_hits.hits ) FROM", "- ('Candidate is jerk, alleges rival', 338647L)\"\"\" sql = '''SELECT * FROM article_hits", "date;\"\"\" \"\"\"Execute sends command to the database\"\"\" c.execute(sql) \"\"\"Returns all info from executed", "PostGRES database session and returns a new connection instance\"\"\" db = psycopg2.connect(database=DBNAME) \"\"\"Sets", "all their articles from article_hits database view\"\"\" \"\"\"Example - ('<NAME>', Decimal('507594'))\"\"\" sql =", "BY sum desc;\"\"\" \"\"\"Execute sends command to the database\"\"\" c.execute(sql) \"\"\"Return all info", "\"\"\"Generate a report of the most popular 3 articles of all time.\"\"\" get_article_counts(TOP_ARTICLES)", "of the most popular authors of all time.\"\"\" popular_authors() \"\"\"Generate a report of", "command to the database\"\"\" c.execute(sql) \"\"\"Returns all info from executed command and formats", "TOP_ARTICLES = \"3\" def get_article_counts(parameters): \"\"\"Return all posts from the 'database', most recent", "command & formats for report view\"\"\" print \"\"\"\\r\\nWho are the most popular article", "\"\"\" views\"\"\" \"\"\"Closes database connection\"\"\" db.close() def popular_authors(): \"\"\"Return all posts from the", "1% of requests lead to errors?\\r\\n\" for row in c.fetchall(): date = row[0]", "view of database with more than 1% error\"\"\" \"\"\"Ex. - ('July 17, 2016',", "article titles and hit counts from article_hits database view\"\"\" \"\"\"Example - ('Candidate is", "= row[1] print str(author) + \"\"\" - \"\"\" + str(count) + \"\"\" views\"\"\"", "\"\"\"Execute sends command to the database\"\"\" c.execute(sql) \"\"\"Returns all info from executed command", "+ \"\"\" - \"\"\" + str(count) + \"\"\" views\"\"\" \"\"\"Closes database connection\"\"\" db.close()", "up interaction with the database to perform operations\"\"\" c = db.cursor() \"\"\"Pull correlated", "from executed command\"\"\" print \"\"\"\\r\\nWhat are the most popular three articles of all", "titles and hit counts from article_hits database view\"\"\" \"\"\"Example - ('Candidate is jerk,", "command\"\"\" print \"\"\"\\r\\nWhat are the most popular three articles of all time?\\r\\n\"\"\" for", "c.fetchall(): date = row[0] error_percent = 100 - row[1] print str(date) + \"\"\"", "of all time?\\r\\n\"\"\" for row in c.fetchall(): author = row[0] count = row[1]", "BY date;\"\"\" \"\"\"Execute sends command to the database\"\"\" c.execute(sql) \"\"\"Returns all info from", "the database to perform operations\"\"\" c = db.cursor() \"\"\"Pull days from errors view", "most popular three articles of all time?\\r\\n\"\"\" for row in c.fetchall(): title =", "+ str(title) + '''\" - ''' + str(count) + \"\"\" views\"\"\" \"\"\"Closes database", "articles, article_hits WHERE article_hits.article_title = articles.title AND authors.id = articles.author GROUP BY authors.name", "all info from executed command & formats for report view\"\"\" print \"\"\"\\r\\nWho are", "count = row[1] print str(author) + \"\"\" - \"\"\" + str(count) + \"\"\"", "all info from executed command and formats for report viewing\"\"\" print \"\\r\\nOn which", "sql = \"\"\"SELECT to_char(date, 'FMMonth FMDD, FMYYYY') as day, percent_good FROM errors WHERE", "popular_authors() \"\"\"Generate a report of the day(s) with more than 1% errors requests.\"\"\"", "= '''SELECT * FROM article_hits limit (%s);''' data = (bleach.clean(parameters),) \"\"\"Execute sends command", "datetime import psycopg2 import bleach DBNAME = \"news\" TOP_ARTICLES = \"3\" def get_article_counts(parameters):", "'FMMonth FMDD, FMYYYY') as day, percent_good FROM errors WHERE percent_good < '99' ORDER", "for row in c.fetchall(): title = row[0] count = row[1] print '''\"''' +", "popular three articles of all time?\\r\\n\"\"\" for row in c.fetchall(): title = row[0]", "\"\"\" - \"\"\" + str(error_percent) + \"\"\"% errors\"\"\" \"\"\"Closes database connection\"\"\" db.close() def", "percent_good < '99' ORDER BY date;\"\"\" \"\"\"Execute sends command to the database\"\"\" c.execute(sql)", "- \"\"\" + str(error_percent) + \"\"\"% errors\"\"\" \"\"\"Closes database connection\"\"\" db.close() def report():", "perform operations\"\"\" c = db.cursor() \"\"\"Pull article titles and hit counts from article_hits", "- \"\"\" + str(count) + \"\"\" views\"\"\" \"\"\"Closes database connection\"\"\" db.close() def error_report():", "get_article_counts(TOP_ARTICLES) \"\"\"Generate a report of the most popular authors of all time.\"\"\" popular_authors()", "their articles from article_hits database view\"\"\" \"\"\"Example - ('<NAME>', Decimal('507594'))\"\"\" sql = \"\"\"SELECT", "with the database to perform operations\"\"\" c = db.cursor() \"\"\"Pull correlated data of", "row[1] print str(date) + \"\"\" - \"\"\" + str(error_percent) + \"\"\"% errors\"\"\" \"\"\"Closes", "of the most popular 3 articles of all time.\"\"\" get_article_counts(TOP_ARTICLES) \"\"\"Generate a report", "AND authors.id = articles.author GROUP BY authors.name ORDER BY sum desc;\"\"\" \"\"\"Execute sends", "for row in c.fetchall(): date = row[0] error_percent = 100 - row[1] print", "\"\"\"% errors\"\"\" \"\"\"Closes database connection\"\"\" db.close() def report(): \"\"\"Generate a report of the", "operations\"\"\" c = db.cursor() \"\"\"Pull days from errors view of database with more", "operations\"\"\" c = db.cursor() \"\"\"Pull article titles and hit counts from article_hits database", "database session and returns a new connection instance\"\"\" db = psycopg2.connect(database=DBNAME) \"\"\"Sets up", "= row[0] count = row[1] print '''\"''' + str(title) + '''\" - '''", "338647L)\"\"\" sql = '''SELECT * FROM article_hits limit (%s);''' data = (bleach.clean(parameters),) \"\"\"Execute", "jerk, alleges rival', 338647L)\"\"\" sql = '''SELECT * FROM article_hits limit (%s);''' data", "BY authors.name ORDER BY sum desc;\"\"\" \"\"\"Execute sends command to the database\"\"\" c.execute(sql)", "import datetime import psycopg2 import bleach DBNAME = \"news\" TOP_ARTICLES = \"3\" def", "data = (bleach.clean(parameters),) \"\"\"Execute sends command to the database\"\"\" c.execute(sql, data) \"\"\"Returns all", "print \"\\r\\nOn which days did more than 1% of requests lead to errors?\\r\\n\"", "is jerk, alleges rival', 338647L)\"\"\" sql = '''SELECT * FROM article_hits limit (%s);'''", "\"\"\"Return all posts from the 'database', most recent first.\"\"\" \"\"\"Open PostGRES database session", "('July 17, 2016', Decimal('97.7'))\"\"\" sql = \"\"\"SELECT to_char(date, 'FMMonth FMDD, FMYYYY') as day,", "WHERE article_hits.article_title = articles.title AND authors.id = articles.author GROUP BY authors.name ORDER BY", "article_hits database view\"\"\" \"\"\"Example - ('<NAME>', Decimal('507594'))\"\"\" sql = \"\"\"SELECT authors.name, SUM( article_hits.hits", "2016', Decimal('97.7'))\"\"\" sql = \"\"\"SELECT to_char(date, 'FMMonth FMDD, FMYYYY') as day, percent_good FROM", "desc;\"\"\" \"\"\"Execute sends command to the database\"\"\" c.execute(sql) \"\"\"Return all info from executed", "'''SELECT * FROM article_hits limit (%s);''' data = (bleach.clean(parameters),) \"\"\"Execute sends command to", "+ str(error_percent) + \"\"\"% errors\"\"\" \"\"\"Closes database connection\"\"\" db.close() def report(): \"\"\"Generate a", "c.fetchall(): title = row[0] count = row[1] print '''\"''' + str(title) + '''\"", "a report of the most popular authors of all time.\"\"\" popular_authors() \"\"\"Generate a", "\"\"\"Execute sends command to the database\"\"\" c.execute(sql, data) \"\"\"Returns all information from executed", "all information from executed command\"\"\" print \"\"\"\\r\\nWhat are the most popular three articles", "a report of the day(s) with more than 1% errors requests.\"\"\" error_report() report()", "<gh_stars>0 #!/usr/bin/env python \"\"\"Database article and errors report generator.\"\"\" import datetime import psycopg2", "title = row[0] count = row[1] print '''\"''' + str(title) + '''\" -", "database to perform operations\"\"\" c = db.cursor() \"\"\"Pull days from errors view of", "- ('July 17, 2016', Decimal('97.7'))\"\"\" sql = \"\"\"SELECT to_char(date, 'FMMonth FMDD, FMYYYY') as", "+ str(count) + \"\"\" views\"\"\" \"\"\"Closes database connection\"\"\" db.close() def popular_authors(): \"\"\"Return all", "str(title) + '''\" - ''' + str(count) + \"\"\" views\"\"\" \"\"\"Closes database connection\"\"\"", "+ str(count) + \"\"\" views\"\"\" \"\"\"Closes database connection\"\"\" db.close() def error_report(): \"\"\"Return all", "views\"\"\" \"\"\"Closes database connection\"\"\" db.close() def error_report(): \"\"\"Return all posts from the 'database',", "with the database to perform operations\"\"\" c = db.cursor() \"\"\"Pull article titles and", "popular_authors(): \"\"\"Return all posts from the 'database', most recent first.\"\"\" \"\"\"Open PostGRES database", "in c.fetchall(): author = row[0] count = row[1] print str(author) + \"\"\" -", "FROM authors, articles, article_hits WHERE article_hits.article_title = articles.title AND authors.id = articles.author GROUP", "\"\"\"\\r\\nWhat are the most popular three articles of all time?\\r\\n\"\"\" for row in", "all time.\"\"\" get_article_counts(TOP_ARTICLES) \"\"\"Generate a report of the most popular authors of all", "row in c.fetchall(): date = row[0] error_percent = 100 - row[1] print str(date)", "row[1] print str(author) + \"\"\" - \"\"\" + str(count) + \"\"\" views\"\"\" \"\"\"Closes", "time.\"\"\" get_article_counts(TOP_ARTICLES) \"\"\"Generate a report of the most popular authors of all time.\"\"\"", "formats for report viewing\"\"\" print \"\\r\\nOn which days did more than 1% of", "generator.\"\"\" import datetime import psycopg2 import bleach DBNAME = \"news\" TOP_ARTICLES = \"3\"", "percent_good FROM errors WHERE percent_good < '99' ORDER BY date;\"\"\" \"\"\"Execute sends command", "article_hits database view\"\"\" \"\"\"Example - ('Candidate is jerk, alleges rival', 338647L)\"\"\" sql =", "= articles.author GROUP BY authors.name ORDER BY sum desc;\"\"\" \"\"\"Execute sends command to", "authors, articles, article_hits WHERE article_hits.article_title = articles.title AND authors.id = articles.author GROUP BY", "FMDD, FMYYYY') as day, percent_good FROM errors WHERE percent_good < '99' ORDER BY", "row[1] print '''\"''' + str(title) + '''\" - ''' + str(count) + \"\"\"", "to perform operations\"\"\" c = db.cursor() \"\"\"Pull days from errors view of database", "count = row[1] print '''\"''' + str(title) + '''\" - ''' + str(count)", "popular 3 articles of all time.\"\"\" get_article_counts(TOP_ARTICLES) \"\"\"Generate a report of the most", "than 1% error\"\"\" \"\"\"Ex. - ('July 17, 2016', Decimal('97.7'))\"\"\" sql = \"\"\"SELECT to_char(date,", "most recent first.\"\"\" \"\"\"Open PostGRES database session and returns a new connection instance\"\"\"", "db.close() def error_report(): \"\"\"Return all posts from the 'database', most recent first.\"\"\" \"\"\"Open", "authors of all time.\"\"\" popular_authors() \"\"\"Generate a report of the day(s) with more", "time?\\r\\n\"\"\" for row in c.fetchall(): title = row[0] count = row[1] print '''\"'''", "'database', most recent first.\"\"\" \"\"\"Open PostGRES database session and returns a new connection", "author = row[0] count = row[1] print str(author) + \"\"\" - \"\"\" +", "rival', 338647L)\"\"\" sql = '''SELECT * FROM article_hits limit (%s);''' data = (bleach.clean(parameters),)", "articles of all time?\\r\\n\"\"\" for row in c.fetchall(): title = row[0] count =", "all time?\\r\\n\"\"\" for row in c.fetchall(): title = row[0] count = row[1] print", "\"\"\"Returns all information from executed command\"\"\" print \"\"\"\\r\\nWhat are the most popular three", "errors WHERE percent_good < '99' ORDER BY date;\"\"\" \"\"\"Execute sends command to the", "= db.cursor() \"\"\"Pull article titles and hit counts from article_hits database view\"\"\" \"\"\"Example", "command to the database\"\"\" c.execute(sql, data) \"\"\"Returns all information from executed command\"\"\" print", "and hit counts from article_hits database view\"\"\" \"\"\"Example - ('Candidate is jerk, alleges", "WHERE percent_good < '99' ORDER BY date;\"\"\" \"\"\"Execute sends command to the database\"\"\"", "errors report generator.\"\"\" import datetime import psycopg2 import bleach DBNAME = \"news\" TOP_ARTICLES", "connection\"\"\" db.close() def report(): \"\"\"Generate a report of the most popular 3 articles", "\"\"\"Pull correlated data of authors and the sums of hits against all their", "most popular article authors of all time?\\r\\n\"\"\" for row in c.fetchall(): author =", "= \"3\" def get_article_counts(parameters): \"\"\"Return all posts from the 'database', most recent first.\"\"\"", "sends command to the database\"\"\" c.execute(sql, data) \"\"\"Returns all information from executed command\"\"\"", "article authors of all time?\\r\\n\"\"\" for row in c.fetchall(): author = row[0] count", "posts from the 'database', most recent first.\"\"\" \"\"\"Open PostGRES database session and returns", "= (bleach.clean(parameters),) \"\"\"Execute sends command to the database\"\"\" c.execute(sql, data) \"\"\"Returns all information", "db = psycopg2.connect(database=DBNAME) \"\"\"Sets up interaction with the database to perform operations\"\"\" c", "100 - row[1] print str(date) + \"\"\" - \"\"\" + str(error_percent) + \"\"\"%", "of all time.\"\"\" popular_authors() \"\"\"Generate a report of the day(s) with more than", "database connection\"\"\" db.close() def error_report(): \"\"\"Return all posts from the 'database', most recent", "info from executed command and formats for report viewing\"\"\" print \"\\r\\nOn which days", "('Candidate is jerk, alleges rival', 338647L)\"\"\" sql = '''SELECT * FROM article_hits limit", "\"\"\"Example - ('<NAME>', Decimal('507594'))\"\"\" sql = \"\"\"SELECT authors.name, SUM( article_hits.hits ) FROM authors,", "error\"\"\" \"\"\"Ex. - ('July 17, 2016', Decimal('97.7'))\"\"\" sql = \"\"\"SELECT to_char(date, 'FMMonth FMDD,", ") FROM authors, articles, article_hits WHERE article_hits.article_title = articles.title AND authors.id = articles.author", "'''\" - ''' + str(count) + \"\"\" views\"\"\" \"\"\"Closes database connection\"\"\" db.close() def", "article_hits WHERE article_hits.article_title = articles.title AND authors.id = articles.author GROUP BY authors.name ORDER", "executed command & formats for report view\"\"\" print \"\"\"\\r\\nWho are the most popular", "three articles of all time?\\r\\n\"\"\" for row in c.fetchall(): title = row[0] count", "for row in c.fetchall(): author = row[0] count = row[1] print str(author) +", "\"\"\" - \"\"\" + str(count) + \"\"\" views\"\"\" \"\"\"Closes database connection\"\"\" db.close() def", "to the database\"\"\" c.execute(sql) \"\"\"Returns all info from executed command and formats for", "report of the most popular 3 articles of all time.\"\"\" get_article_counts(TOP_ARTICLES) \"\"\"Generate a", "the most popular article authors of all time?\\r\\n\"\"\" for row in c.fetchall(): author", "interaction with the database to perform operations\"\"\" c = db.cursor() \"\"\"Pull article titles", "< '99' ORDER BY date;\"\"\" \"\"\"Execute sends command to the database\"\"\" c.execute(sql) \"\"\"Returns", "FROM errors WHERE percent_good < '99' ORDER BY date;\"\"\" \"\"\"Execute sends command to", "& formats for report view\"\"\" print \"\"\"\\r\\nWho are the most popular article authors", "row[0] count = row[1] print str(author) + \"\"\" - \"\"\" + str(count) +", "date = row[0] error_percent = 100 - row[1] print str(date) + \"\"\" -", "report view\"\"\" print \"\"\"\\r\\nWho are the most popular article authors of all time?\\r\\n\"\"\"", "with the database to perform operations\"\"\" c = db.cursor() \"\"\"Pull days from errors", "\"\"\"Database article and errors report generator.\"\"\" import datetime import psycopg2 import bleach DBNAME", "print str(date) + \"\"\" - \"\"\" + str(error_percent) + \"\"\"% errors\"\"\" \"\"\"Closes database", "and the sums of hits against all their articles from article_hits database view\"\"\"", "view\"\"\" print \"\"\"\\r\\nWho are the most popular article authors of all time?\\r\\n\"\"\" for", "are the most popular article authors of all time?\\r\\n\"\"\" for row in c.fetchall():", "of hits against all their articles from article_hits database view\"\"\" \"\"\"Example - ('<NAME>',", "#!/usr/bin/env python \"\"\"Database article and errors report generator.\"\"\" import datetime import psycopg2 import", "+ '''\" - ''' + str(count) + \"\"\" views\"\"\" \"\"\"Closes database connection\"\"\" db.close()", "= articles.title AND authors.id = articles.author GROUP BY authors.name ORDER BY sum desc;\"\"\"", "= row[1] print '''\"''' + str(title) + '''\" - ''' + str(count) +", "and errors report generator.\"\"\" import datetime import psycopg2 import bleach DBNAME = \"news\"", "all time.\"\"\" popular_authors() \"\"\"Generate a report of the day(s) with more than 1%", "str(author) + \"\"\" - \"\"\" + str(count) + \"\"\" views\"\"\" \"\"\"Closes database connection\"\"\"", "session and returns a new connection instance\"\"\" db = psycopg2.connect(database=DBNAME) \"\"\"Sets up interaction", "database view\"\"\" \"\"\"Example - ('Candidate is jerk, alleges rival', 338647L)\"\"\" sql = '''SELECT", "\"\"\"Generate a report of the day(s) with more than 1% errors requests.\"\"\" error_report()", "sql = \"\"\"SELECT authors.name, SUM( article_hits.hits ) FROM authors, articles, article_hits WHERE article_hits.article_title", "connection\"\"\" db.close() def popular_authors(): \"\"\"Return all posts from the 'database', most recent first.\"\"\"", "sums of hits against all their articles from article_hits database view\"\"\" \"\"\"Example -", "as day, percent_good FROM errors WHERE percent_good < '99' ORDER BY date;\"\"\" \"\"\"Execute", "error_percent = 100 - row[1] print str(date) + \"\"\" - \"\"\" + str(error_percent)", "+ \"\"\"% errors\"\"\" \"\"\"Closes database connection\"\"\" db.close() def report(): \"\"\"Generate a report of", "- row[1] print str(date) + \"\"\" - \"\"\" + str(error_percent) + \"\"\"% errors\"\"\"", "db.close() def report(): \"\"\"Generate a report of the most popular 3 articles of", "\"\"\" views\"\"\" \"\"\"Closes database connection\"\"\" db.close() def error_report(): \"\"\"Return all posts from the", "against all their articles from article_hits database view\"\"\" \"\"\"Example - ('<NAME>', Decimal('507594'))\"\"\" sql", "formats for report view\"\"\" print \"\"\"\\r\\nWho are the most popular article authors of", "'''\"''' + str(title) + '''\" - ''' + str(count) + \"\"\" views\"\"\" \"\"\"Closes", "for report viewing\"\"\" print \"\\r\\nOn which days did more than 1% of requests", "ORDER BY date;\"\"\" \"\"\"Execute sends command to the database\"\"\" c.execute(sql) \"\"\"Returns all info", "day, percent_good FROM errors WHERE percent_good < '99' ORDER BY date;\"\"\" \"\"\"Execute sends", "information from executed command\"\"\" print \"\"\"\\r\\nWhat are the most popular three articles of", "1% error\"\"\" \"\"\"Ex. - ('July 17, 2016', Decimal('97.7'))\"\"\" sql = \"\"\"SELECT to_char(date, 'FMMonth", "info from executed command & formats for report view\"\"\" print \"\"\"\\r\\nWho are the", "row in c.fetchall(): title = row[0] count = row[1] print '''\"''' + str(title)", "and formats for report viewing\"\"\" print \"\\r\\nOn which days did more than 1%", "returns a new connection instance\"\"\" db = psycopg2.connect(database=DBNAME) \"\"\"Sets up interaction with the", "(%s);''' data = (bleach.clean(parameters),) \"\"\"Execute sends command to the database\"\"\" c.execute(sql, data) \"\"\"Returns", "the database\"\"\" c.execute(sql) \"\"\"Return all info from executed command & formats for report", "did more than 1% of requests lead to errors?\\r\\n\" for row in c.fetchall():", "more than 1% error\"\"\" \"\"\"Ex. - ('July 17, 2016', Decimal('97.7'))\"\"\" sql = \"\"\"SELECT", "Decimal('97.7'))\"\"\" sql = \"\"\"SELECT to_char(date, 'FMMonth FMDD, FMYYYY') as day, percent_good FROM errors", "sends command to the database\"\"\" c.execute(sql) \"\"\"Returns all info from executed command and", "the most popular three articles of all time?\\r\\n\"\"\" for row in c.fetchall(): title", "+ \"\"\" views\"\"\" \"\"\"Closes database connection\"\"\" db.close() def popular_authors(): \"\"\"Return all posts from", "up interaction with the database to perform operations\"\"\" c = db.cursor() \"\"\"Pull days", "3 articles of all time.\"\"\" get_article_counts(TOP_ARTICLES) \"\"\"Generate a report of the most popular", "17, 2016', Decimal('97.7'))\"\"\" sql = \"\"\"SELECT to_char(date, 'FMMonth FMDD, FMYYYY') as day, percent_good", "def report(): \"\"\"Generate a report of the most popular 3 articles of all", "\"\"\"SELECT authors.name, SUM( article_hits.hits ) FROM authors, articles, article_hits WHERE article_hits.article_title = articles.title", "connection instance\"\"\" db = psycopg2.connect(database=DBNAME) \"\"\"Sets up interaction with the database to perform", "limit (%s);''' data = (bleach.clean(parameters),) \"\"\"Execute sends command to the database\"\"\" c.execute(sql, data)", "article and errors report generator.\"\"\" import datetime import psycopg2 import bleach DBNAME =", "import bleach DBNAME = \"news\" TOP_ARTICLES = \"3\" def get_article_counts(parameters): \"\"\"Return all posts", "\"\"\"\\r\\nWho are the most popular article authors of all time?\\r\\n\"\"\" for row in", "article_hits.hits ) FROM authors, articles, article_hits WHERE article_hits.article_title = articles.title AND authors.id =", "str(count) + \"\"\" views\"\"\" \"\"\"Closes database connection\"\"\" db.close() def error_report(): \"\"\"Return all posts", "data) \"\"\"Returns all information from executed command\"\"\" print \"\"\"\\r\\nWhat are the most popular", "FMYYYY') as day, percent_good FROM errors WHERE percent_good < '99' ORDER BY date;\"\"\"", "report(): \"\"\"Generate a report of the most popular 3 articles of all time.\"\"\"", "all posts from the 'database', most recent first.\"\"\" \"\"\"Open PostGRES database session and", "a new connection instance\"\"\" db = psycopg2.connect(database=DBNAME) \"\"\"Sets up interaction with the database", "database\"\"\" c.execute(sql) \"\"\"Returns all info from executed command and formats for report viewing\"\"\"", "\"\"\"Execute sends command to the database\"\"\" c.execute(sql) \"\"\"Return all info from executed command", "database\"\"\" c.execute(sql, data) \"\"\"Returns all information from executed command\"\"\" print \"\"\"\\r\\nWhat are the", "database to perform operations\"\"\" c = db.cursor() \"\"\"Pull correlated data of authors and", "\"\"\"Open PostGRES database session and returns a new connection instance\"\"\" db = psycopg2.connect(database=DBNAME)", "for report view\"\"\" print \"\"\"\\r\\nWho are the most popular article authors of all", "in c.fetchall(): date = row[0] error_percent = 100 - row[1] print str(date) +", "all time?\\r\\n\"\"\" for row in c.fetchall(): author = row[0] count = row[1] print", "viewing\"\"\" print \"\\r\\nOn which days did more than 1% of requests lead to", "of all time?\\r\\n\"\"\" for row in c.fetchall(): title = row[0] count = row[1]", "executed command\"\"\" print \"\"\"\\r\\nWhat are the most popular three articles of all time?\\r\\n\"\"\"", "\"\"\"Return all info from executed command & formats for report view\"\"\" print \"\"\"\\r\\nWho", "new connection instance\"\"\" db = psycopg2.connect(database=DBNAME) \"\"\"Sets up interaction with the database to", "\"\"\"Closes database connection\"\"\" db.close() def report(): \"\"\"Generate a report of the most popular", "\"\"\"Closes database connection\"\"\" db.close() def error_report(): \"\"\"Return all posts from the 'database', most", "perform operations\"\"\" c = db.cursor() \"\"\"Pull days from errors view of database with", "correlated data of authors and the sums of hits against all their articles", "str(count) + \"\"\" views\"\"\" \"\"\"Closes database connection\"\"\" db.close() def popular_authors(): \"\"\"Return all posts", "from executed command & formats for report view\"\"\" print \"\"\"\\r\\nWho are the most", "articles.title AND authors.id = articles.author GROUP BY authors.name ORDER BY sum desc;\"\"\" \"\"\"Execute", "c.execute(sql) \"\"\"Return all info from executed command & formats for report view\"\"\" print", "most popular authors of all time.\"\"\" popular_authors() \"\"\"Generate a report of the day(s)", "''' + str(count) + \"\"\" views\"\"\" \"\"\"Closes database connection\"\"\" db.close() def popular_authors(): \"\"\"Return", "error_report(): \"\"\"Return all posts from the 'database', most recent first.\"\"\" \"\"\"Open PostGRES database", "database connection\"\"\" db.close() def popular_authors(): \"\"\"Return all posts from the 'database', most recent", "= \"\"\"SELECT to_char(date, 'FMMonth FMDD, FMYYYY') as day, percent_good FROM errors WHERE percent_good", "view\"\"\" \"\"\"Example - ('Candidate is jerk, alleges rival', 338647L)\"\"\" sql = '''SELECT *", "more than 1% of requests lead to errors?\\r\\n\" for row in c.fetchall(): date", "('<NAME>', Decimal('507594'))\"\"\" sql = \"\"\"SELECT authors.name, SUM( article_hits.hits ) FROM authors, articles, article_hits", "most popular 3 articles of all time.\"\"\" get_article_counts(TOP_ARTICLES) \"\"\"Generate a report of the", "views\"\"\" \"\"\"Closes database connection\"\"\" db.close() def popular_authors(): \"\"\"Return all posts from the 'database',", "errors?\\r\\n\" for row in c.fetchall(): date = row[0] error_percent = 100 - row[1]", "lead to errors?\\r\\n\" for row in c.fetchall(): date = row[0] error_percent = 100", "alleges rival', 338647L)\"\"\" sql = '''SELECT * FROM article_hits limit (%s);''' data =", "article_hits.article_title = articles.title AND authors.id = articles.author GROUP BY authors.name ORDER BY sum", "up interaction with the database to perform operations\"\"\" c = db.cursor() \"\"\"Pull article", "in c.fetchall(): title = row[0] count = row[1] print '''\"''' + str(title) +", "counts from article_hits database view\"\"\" \"\"\"Example - ('Candidate is jerk, alleges rival', 338647L)\"\"\"", "= row[0] error_percent = 100 - row[1] print str(date) + \"\"\" - \"\"\"", "Decimal('507594'))\"\"\" sql = \"\"\"SELECT authors.name, SUM( article_hits.hits ) FROM authors, articles, article_hits WHERE", "= db.cursor() \"\"\"Pull days from errors view of database with more than 1%", "db.cursor() \"\"\"Pull days from errors view of database with more than 1% error\"\"\"", "of all time.\"\"\" get_article_counts(TOP_ARTICLES) \"\"\"Generate a report of the most popular authors of", "requests lead to errors?\\r\\n\" for row in c.fetchall(): date = row[0] error_percent =", "to perform operations\"\"\" c = db.cursor() \"\"\"Pull article titles and hit counts from", "a report of the most popular 3 articles of all time.\"\"\" get_article_counts(TOP_ARTICLES) \"\"\"Generate", "c.execute(sql, data) \"\"\"Returns all information from executed command\"\"\" print \"\"\"\\r\\nWhat are the most", "\"\\r\\nOn which days did more than 1% of requests lead to errors?\\r\\n\" for", "the database to perform operations\"\"\" c = db.cursor() \"\"\"Pull article titles and hit", "\"3\" def get_article_counts(parameters): \"\"\"Return all posts from the 'database', most recent first.\"\"\" \"\"\"Open", "= psycopg2.connect(database=DBNAME) \"\"\"Sets up interaction with the database to perform operations\"\"\" c =", "def error_report(): \"\"\"Return all posts from the 'database', most recent first.\"\"\" \"\"\"Open PostGRES", "authors.id = articles.author GROUP BY authors.name ORDER BY sum desc;\"\"\" \"\"\"Execute sends command", "print \"\"\"\\r\\nWhat are the most popular three articles of all time?\\r\\n\"\"\" for row", "errors\"\"\" \"\"\"Closes database connection\"\"\" db.close() def report(): \"\"\"Generate a report of the most", "= row[0] count = row[1] print str(author) + \"\"\" - \"\"\" + str(count)", "report generator.\"\"\" import datetime import psycopg2 import bleach DBNAME = \"news\" TOP_ARTICLES =", "article_hits limit (%s);''' data = (bleach.clean(parameters),) \"\"\"Execute sends command to the database\"\"\" c.execute(sql,", "first.\"\"\" \"\"\"Open PostGRES database session and returns a new connection instance\"\"\" db =", "and returns a new connection instance\"\"\" db = psycopg2.connect(database=DBNAME) \"\"\"Sets up interaction with", "interaction with the database to perform operations\"\"\" c = db.cursor() \"\"\"Pull days from", "the most popular authors of all time.\"\"\" popular_authors() \"\"\"Generate a report of the", "database\"\"\" c.execute(sql) \"\"\"Return all info from executed command & formats for report view\"\"\"", "report viewing\"\"\" print \"\\r\\nOn which days did more than 1% of requests lead", "from article_hits database view\"\"\" \"\"\"Example - ('Candidate is jerk, alleges rival', 338647L)\"\"\" sql", "authors of all time?\\r\\n\"\"\" for row in c.fetchall(): author = row[0] count =", "= \"\"\"SELECT authors.name, SUM( article_hits.hits ) FROM authors, articles, article_hits WHERE article_hits.article_title =", "to the database\"\"\" c.execute(sql) \"\"\"Return all info from executed command & formats for", "\"\"\"Closes database connection\"\"\" db.close() def popular_authors(): \"\"\"Return all posts from the 'database', most", "\"\"\"Example - ('Candidate is jerk, alleges rival', 338647L)\"\"\" sql = '''SELECT * FROM", "with more than 1% error\"\"\" \"\"\"Ex. - ('July 17, 2016', Decimal('97.7'))\"\"\" sql =", "def popular_authors(): \"\"\"Return all posts from the 'database', most recent first.\"\"\" \"\"\"Open PostGRES", "interaction with the database to perform operations\"\"\" c = db.cursor() \"\"\"Pull correlated data", "\"\"\"Returns all info from executed command and formats for report viewing\"\"\" print \"\\r\\nOn", "psycopg2 import bleach DBNAME = \"news\" TOP_ARTICLES = \"3\" def get_article_counts(parameters): \"\"\"Return all", "sum desc;\"\"\" \"\"\"Execute sends command to the database\"\"\" c.execute(sql) \"\"\"Return all info from", "db.cursor() \"\"\"Pull article titles and hit counts from article_hits database view\"\"\" \"\"\"Example -", "hits against all their articles from article_hits database view\"\"\" \"\"\"Example - ('<NAME>', Decimal('507594'))\"\"\"", "database connection\"\"\" db.close() def report(): \"\"\"Generate a report of the most popular 3", "from article_hits database view\"\"\" \"\"\"Example - ('<NAME>', Decimal('507594'))\"\"\" sql = \"\"\"SELECT authors.name, SUM(", "print str(author) + \"\"\" - \"\"\" + str(count) + \"\"\" views\"\"\" \"\"\"Closes database", "sends command to the database\"\"\" c.execute(sql) \"\"\"Return all info from executed command &", "\"news\" TOP_ARTICLES = \"3\" def get_article_counts(parameters): \"\"\"Return all posts from the 'database', most", "row in c.fetchall(): author = row[0] count = row[1] print str(author) + \"\"\"", "report of the most popular authors of all time.\"\"\" popular_authors() \"\"\"Generate a report", "to perform operations\"\"\" c = db.cursor() \"\"\"Pull correlated data of authors and the", "ORDER BY sum desc;\"\"\" \"\"\"Execute sends command to the database\"\"\" c.execute(sql) \"\"\"Return all", "executed command and formats for report viewing\"\"\" print \"\\r\\nOn which days did more", "than 1% of requests lead to errors?\\r\\n\" for row in c.fetchall(): date =", "+ \"\"\" - \"\"\" + str(error_percent) + \"\"\"% errors\"\"\" \"\"\"Closes database connection\"\"\" db.close()", "the database\"\"\" c.execute(sql, data) \"\"\"Returns all information from executed command\"\"\" print \"\"\"\\r\\nWhat are", "print \"\"\"\\r\\nWho are the most popular article authors of all time?\\r\\n\"\"\" for row", "'99' ORDER BY date;\"\"\" \"\"\"Execute sends command to the database\"\"\" c.execute(sql) \"\"\"Returns all", "to_char(date, 'FMMonth FMDD, FMYYYY') as day, percent_good FROM errors WHERE percent_good < '99'", "database with more than 1% error\"\"\" \"\"\"Ex. - ('July 17, 2016', Decimal('97.7'))\"\"\" sql", "of database with more than 1% error\"\"\" \"\"\"Ex. - ('July 17, 2016', Decimal('97.7'))\"\"\"", "GROUP BY authors.name ORDER BY sum desc;\"\"\" \"\"\"Execute sends command to the database\"\"\"", "authors and the sums of hits against all their articles from article_hits database", "the 'database', most recent first.\"\"\" \"\"\"Open PostGRES database session and returns a new", "database view\"\"\" \"\"\"Example - ('<NAME>', Decimal('507594'))\"\"\" sql = \"\"\"SELECT authors.name, SUM( article_hits.hits )", "\"\"\"Pull article titles and hit counts from article_hits database view\"\"\" \"\"\"Example - ('Candidate", "errors view of database with more than 1% error\"\"\" \"\"\"Ex. - ('July 17,", "\"\"\"Ex. - ('July 17, 2016', Decimal('97.7'))\"\"\" sql = \"\"\"SELECT to_char(date, 'FMMonth FMDD, FMYYYY')", "from the 'database', most recent first.\"\"\" \"\"\"Open PostGRES database session and returns a", "- ''' + str(count) + \"\"\" views\"\"\" \"\"\"Closes database connection\"\"\" db.close() def popular_authors():", "str(date) + \"\"\" - \"\"\" + str(error_percent) + \"\"\"% errors\"\"\" \"\"\"Closes database connection\"\"\"", "the database\"\"\" c.execute(sql) \"\"\"Returns all info from executed command and formats for report", "= db.cursor() \"\"\"Pull correlated data of authors and the sums of hits against", "hit counts from article_hits database view\"\"\" \"\"\"Example - ('Candidate is jerk, alleges rival',", "c.execute(sql) \"\"\"Returns all info from executed command and formats for report viewing\"\"\" print", "\"\"\"Generate a report of the most popular authors of all time.\"\"\" popular_authors() \"\"\"Generate", "command and formats for report viewing\"\"\" print \"\\r\\nOn which days did more than", "authors.name ORDER BY sum desc;\"\"\" \"\"\"Execute sends command to the database\"\"\" c.execute(sql) \"\"\"Return", "\"\"\"Sets up interaction with the database to perform operations\"\"\" c = db.cursor() \"\"\"Pull", "db.cursor() \"\"\"Pull correlated data of authors and the sums of hits against all", "time.\"\"\" popular_authors() \"\"\"Generate a report of the day(s) with more than 1% errors", "= 100 - row[1] print str(date) + \"\"\" - \"\"\" + str(error_percent) +" ]
[ "program import control SOCK_PORT = 4242 HTTP_PORT = 80 robotContoler = control.control(HTTP_PORT, SOCK_PORT)", "#Kent State Univeristy - RMC team #<NAME> 2018 # #Starts program import control", "Univeristy - RMC team #<NAME> 2018 # #Starts program import control SOCK_PORT =", "RMC team #<NAME> 2018 # #Starts program import control SOCK_PORT = 4242 HTTP_PORT", "State Univeristy - RMC team #<NAME> 2018 # #Starts program import control SOCK_PORT", "- RMC team #<NAME> 2018 # #Starts program import control SOCK_PORT = 4242", "#Starts program import control SOCK_PORT = 4242 HTTP_PORT = 80 robotContoler = control.control(HTTP_PORT,", "<filename>control/init.py #Kent State Univeristy - RMC team #<NAME> 2018 # #Starts program import", "team #<NAME> 2018 # #Starts program import control SOCK_PORT = 4242 HTTP_PORT =", "# #Starts program import control SOCK_PORT = 4242 HTTP_PORT = 80 robotContoler =", "2018 # #Starts program import control SOCK_PORT = 4242 HTTP_PORT = 80 robotContoler", "#<NAME> 2018 # #Starts program import control SOCK_PORT = 4242 HTTP_PORT = 80" ]
[ "X, freq_weights = weight, family = bin_family) my_glm_fit = my_glm.fit() theta = my_glm_fit.params", "as sm from statsmodels.genmod.generalized_linear_model import GLM from statsmodels.genmod.families import Binomial from statsmodels.genmod.families.links import", "datetime.datetime.now() cur.execute(\"INSERT INTO scores VALUES (%s, %s, %s, %s, %s)\", (str(current_dt), theta[0], theta[1],", "pd #import statsmodels.api as sm from statsmodels.genmod.generalized_linear_model import GLM from statsmodels.genmod.families import Binomial", "= GLM(y, X, freq_weights = weight, family = bin_family) my_glm_fit = my_glm.fit() theta", "as np import pandas as pd #import statsmodels.api as sm from statsmodels.genmod.generalized_linear_model import", "perform db ops cur.execute(\"SELECT * FROM Iris;\") iris_df = pd.DataFrame(cur.fetchall()) X = np.array(iris_df.iloc[:,", "psycopg2.connect(DATABASE_URL, sslmode='require') cur = conn.cursor() # cursor needed to perform db ops cur.execute(\"SELECT", "(%s, %s, %s, %s, %s)\", (str(current_dt), theta[0], theta[1], theta[2], theta[3])) conn.commit() cur.close() conn.close()", "Iris;\") iris_df = pd.DataFrame(cur.fetchall()) X = np.array(iris_df.iloc[:, 0:4]) y = np.array(iris_df.iloc[:, 4] ==", "probit DATABASE_URL = os.environ['DATABASE_URL'] conn = psycopg2.connect(DATABASE_URL, sslmode='require') cur = conn.cursor() # cursor", "numpy as np import pandas as pd #import statsmodels.api as sm from statsmodels.genmod.generalized_linear_model", "VALUES (%s, %s, %s, %s, %s)\", (str(current_dt), theta[0], theta[1], theta[2], theta[3])) conn.commit() cur.close()", "os import datetime import psycopg2 import numpy as np import pandas as pd", "sslmode='require') cur = conn.cursor() # cursor needed to perform db ops cur.execute(\"SELECT *", "pd.DataFrame(cur.fetchall()) X = np.array(iris_df.iloc[:, 0:4]) y = np.array(iris_df.iloc[:, 4] == 'virginica', dtype=int) weight", "'virginica', dtype=int) weight = np.ones(150) probit_link = probit() bin_family = Binomial(probit_link) my_glm =", "weight, family = bin_family) my_glm_fit = my_glm.fit() theta = my_glm_fit.params current_dt = datetime.datetime.now()", "= datetime.datetime.now() cur.execute(\"INSERT INTO scores VALUES (%s, %s, %s, %s, %s)\", (str(current_dt), theta[0],", "= probit() bin_family = Binomial(probit_link) my_glm = GLM(y, X, freq_weights = weight, family", "= pd.DataFrame(cur.fetchall()) X = np.array(iris_df.iloc[:, 0:4]) y = np.array(iris_df.iloc[:, 4] == 'virginica', dtype=int)", "= my_glm.fit() theta = my_glm_fit.params current_dt = datetime.datetime.now() cur.execute(\"INSERT INTO scores VALUES (%s,", "GLM from statsmodels.genmod.families import Binomial from statsmodels.genmod.families.links import probit DATABASE_URL = os.environ['DATABASE_URL'] conn", "cursor needed to perform db ops cur.execute(\"SELECT * FROM Iris;\") iris_df = pd.DataFrame(cur.fetchall())", "current_dt = datetime.datetime.now() cur.execute(\"INSERT INTO scores VALUES (%s, %s, %s, %s, %s)\", (str(current_dt),", "from statsmodels.genmod.generalized_linear_model import GLM from statsmodels.genmod.families import Binomial from statsmodels.genmod.families.links import probit DATABASE_URL", "statsmodels.genmod.generalized_linear_model import GLM from statsmodels.genmod.families import Binomial from statsmodels.genmod.families.links import probit DATABASE_URL =", "statsmodels.genmod.families.links import probit DATABASE_URL = os.environ['DATABASE_URL'] conn = psycopg2.connect(DATABASE_URL, sslmode='require') cur = conn.cursor()", "my_glm = GLM(y, X, freq_weights = weight, family = bin_family) my_glm_fit = my_glm.fit()", "db ops cur.execute(\"SELECT * FROM Iris;\") iris_df = pd.DataFrame(cur.fetchall()) X = np.array(iris_df.iloc[:, 0:4])", "statsmodels.genmod.families import Binomial from statsmodels.genmod.families.links import probit DATABASE_URL = os.environ['DATABASE_URL'] conn = psycopg2.connect(DATABASE_URL,", "np import pandas as pd #import statsmodels.api as sm from statsmodels.genmod.generalized_linear_model import GLM", "= Binomial(probit_link) my_glm = GLM(y, X, freq_weights = weight, family = bin_family) my_glm_fit", "= os.environ['DATABASE_URL'] conn = psycopg2.connect(DATABASE_URL, sslmode='require') cur = conn.cursor() # cursor needed to", "bin_family) my_glm_fit = my_glm.fit() theta = my_glm_fit.params current_dt = datetime.datetime.now() cur.execute(\"INSERT INTO scores", "Binomial(probit_link) my_glm = GLM(y, X, freq_weights = weight, family = bin_family) my_glm_fit =", "as pd #import statsmodels.api as sm from statsmodels.genmod.generalized_linear_model import GLM from statsmodels.genmod.families import", "cur = conn.cursor() # cursor needed to perform db ops cur.execute(\"SELECT * FROM", "np.array(iris_df.iloc[:, 0:4]) y = np.array(iris_df.iloc[:, 4] == 'virginica', dtype=int) weight = np.ones(150) probit_link", "conn.cursor() # cursor needed to perform db ops cur.execute(\"SELECT * FROM Iris;\") iris_df", "sm from statsmodels.genmod.generalized_linear_model import GLM from statsmodels.genmod.families import Binomial from statsmodels.genmod.families.links import probit", "= np.array(iris_df.iloc[:, 4] == 'virginica', dtype=int) weight = np.ones(150) probit_link = probit() bin_family", "freq_weights = weight, family = bin_family) my_glm_fit = my_glm.fit() theta = my_glm_fit.params current_dt", "X = np.array(iris_df.iloc[:, 0:4]) y = np.array(iris_df.iloc[:, 4] == 'virginica', dtype=int) weight =", "= np.array(iris_df.iloc[:, 0:4]) y = np.array(iris_df.iloc[:, 4] == 'virginica', dtype=int) weight = np.ones(150)", "theta = my_glm_fit.params current_dt = datetime.datetime.now() cur.execute(\"INSERT INTO scores VALUES (%s, %s, %s,", "# cursor needed to perform db ops cur.execute(\"SELECT * FROM Iris;\") iris_df =", "pandas as pd #import statsmodels.api as sm from statsmodels.genmod.generalized_linear_model import GLM from statsmodels.genmod.families", "= conn.cursor() # cursor needed to perform db ops cur.execute(\"SELECT * FROM Iris;\")", "os.environ['DATABASE_URL'] conn = psycopg2.connect(DATABASE_URL, sslmode='require') cur = conn.cursor() # cursor needed to perform", "== 'virginica', dtype=int) weight = np.ones(150) probit_link = probit() bin_family = Binomial(probit_link) my_glm", "probit() bin_family = Binomial(probit_link) my_glm = GLM(y, X, freq_weights = weight, family =", "DATABASE_URL = os.environ['DATABASE_URL'] conn = psycopg2.connect(DATABASE_URL, sslmode='require') cur = conn.cursor() # cursor needed", "my_glm_fit.params current_dt = datetime.datetime.now() cur.execute(\"INSERT INTO scores VALUES (%s, %s, %s, %s, %s)\",", "y = np.array(iris_df.iloc[:, 4] == 'virginica', dtype=int) weight = np.ones(150) probit_link = probit()", "= weight, family = bin_family) my_glm_fit = my_glm.fit() theta = my_glm_fit.params current_dt =", "import probit DATABASE_URL = os.environ['DATABASE_URL'] conn = psycopg2.connect(DATABASE_URL, sslmode='require') cur = conn.cursor() #", "* FROM Iris;\") iris_df = pd.DataFrame(cur.fetchall()) X = np.array(iris_df.iloc[:, 0:4]) y = np.array(iris_df.iloc[:,", "iris_df = pd.DataFrame(cur.fetchall()) X = np.array(iris_df.iloc[:, 0:4]) y = np.array(iris_df.iloc[:, 4] == 'virginica',", "import numpy as np import pandas as pd #import statsmodels.api as sm from", "import datetime import psycopg2 import numpy as np import pandas as pd #import", "= psycopg2.connect(DATABASE_URL, sslmode='require') cur = conn.cursor() # cursor needed to perform db ops", "FROM Iris;\") iris_df = pd.DataFrame(cur.fetchall()) X = np.array(iris_df.iloc[:, 0:4]) y = np.array(iris_df.iloc[:, 4]", "import psycopg2 import numpy as np import pandas as pd #import statsmodels.api as", "cur.execute(\"SELECT * FROM Iris;\") iris_df = pd.DataFrame(cur.fetchall()) X = np.array(iris_df.iloc[:, 0:4]) y =", "GLM(y, X, freq_weights = weight, family = bin_family) my_glm_fit = my_glm.fit() theta =", "ops cur.execute(\"SELECT * FROM Iris;\") iris_df = pd.DataFrame(cur.fetchall()) X = np.array(iris_df.iloc[:, 0:4]) y", "needed to perform db ops cur.execute(\"SELECT * FROM Iris;\") iris_df = pd.DataFrame(cur.fetchall()) X", "import GLM from statsmodels.genmod.families import Binomial from statsmodels.genmod.families.links import probit DATABASE_URL = os.environ['DATABASE_URL']", "4] == 'virginica', dtype=int) weight = np.ones(150) probit_link = probit() bin_family = Binomial(probit_link)", "#import statsmodels.api as sm from statsmodels.genmod.generalized_linear_model import GLM from statsmodels.genmod.families import Binomial from", "np.array(iris_df.iloc[:, 4] == 'virginica', dtype=int) weight = np.ones(150) probit_link = probit() bin_family =", "import pandas as pd #import statsmodels.api as sm from statsmodels.genmod.generalized_linear_model import GLM from", "= my_glm_fit.params current_dt = datetime.datetime.now() cur.execute(\"INSERT INTO scores VALUES (%s, %s, %s, %s,", "to perform db ops cur.execute(\"SELECT * FROM Iris;\") iris_df = pd.DataFrame(cur.fetchall()) X =", "from statsmodels.genmod.families.links import probit DATABASE_URL = os.environ['DATABASE_URL'] conn = psycopg2.connect(DATABASE_URL, sslmode='require') cur =", "from statsmodels.genmod.families import Binomial from statsmodels.genmod.families.links import probit DATABASE_URL = os.environ['DATABASE_URL'] conn =", "= np.ones(150) probit_link = probit() bin_family = Binomial(probit_link) my_glm = GLM(y, X, freq_weights", "scores VALUES (%s, %s, %s, %s, %s)\", (str(current_dt), theta[0], theta[1], theta[2], theta[3])) conn.commit()", "0:4]) y = np.array(iris_df.iloc[:, 4] == 'virginica', dtype=int) weight = np.ones(150) probit_link =", "= bin_family) my_glm_fit = my_glm.fit() theta = my_glm_fit.params current_dt = datetime.datetime.now() cur.execute(\"INSERT INTO", "psycopg2 import numpy as np import pandas as pd #import statsmodels.api as sm", "import Binomial from statsmodels.genmod.families.links import probit DATABASE_URL = os.environ['DATABASE_URL'] conn = psycopg2.connect(DATABASE_URL, sslmode='require')", "cur.execute(\"INSERT INTO scores VALUES (%s, %s, %s, %s, %s)\", (str(current_dt), theta[0], theta[1], theta[2],", "datetime import psycopg2 import numpy as np import pandas as pd #import statsmodels.api", "probit_link = probit() bin_family = Binomial(probit_link) my_glm = GLM(y, X, freq_weights = weight,", "my_glm_fit = my_glm.fit() theta = my_glm_fit.params current_dt = datetime.datetime.now() cur.execute(\"INSERT INTO scores VALUES", "conn = psycopg2.connect(DATABASE_URL, sslmode='require') cur = conn.cursor() # cursor needed to perform db", "import os import datetime import psycopg2 import numpy as np import pandas as", "np.ones(150) probit_link = probit() bin_family = Binomial(probit_link) my_glm = GLM(y, X, freq_weights =", "my_glm.fit() theta = my_glm_fit.params current_dt = datetime.datetime.now() cur.execute(\"INSERT INTO scores VALUES (%s, %s,", "dtype=int) weight = np.ones(150) probit_link = probit() bin_family = Binomial(probit_link) my_glm = GLM(y,", "bin_family = Binomial(probit_link) my_glm = GLM(y, X, freq_weights = weight, family = bin_family)", "statsmodels.api as sm from statsmodels.genmod.generalized_linear_model import GLM from statsmodels.genmod.families import Binomial from statsmodels.genmod.families.links", "weight = np.ones(150) probit_link = probit() bin_family = Binomial(probit_link) my_glm = GLM(y, X,", "Binomial from statsmodels.genmod.families.links import probit DATABASE_URL = os.environ['DATABASE_URL'] conn = psycopg2.connect(DATABASE_URL, sslmode='require') cur", "family = bin_family) my_glm_fit = my_glm.fit() theta = my_glm_fit.params current_dt = datetime.datetime.now() cur.execute(\"INSERT", "INTO scores VALUES (%s, %s, %s, %s, %s)\", (str(current_dt), theta[0], theta[1], theta[2], theta[3]))" ]
[ "* sqrt39) / 54880 C = (297 - 47 * sqrt39) / 32928", "points, 7, citation) def stroud_secrest_11(): sqrt5 = sqrt(5) sqrt39 = sqrt(39) sqrt195 =", "sqrt195) for p_m in [+1, -1] ] eta = sqrt(36 + 4 *", "* sqrt130) / 11) xi = sqrt(288 + 24 * sqrt130) eta =", "[ (A, numpy.array([[0, 0, 0]])), (B, pm_roll(3, [xi, nu])), (C, pm(3, eta)), (C,", "[ (A, numpy.array([[0, 0, 0]])), (B, fsd(3, (nu, 1))), (C, fsd(3, (xi, 2))),", "citation = article( authors=[\"<NAME>\", \"<NAME>\"], title=\"Approximate integration formulas for certain spherically symmetric regions\",", "- 26 * sqrt39) / 2940 B = (1065 + 171 * sqrt39)", "= frac(3, 100) data = [(A, numpy.array([[0, 0, 0]])), (B, fsd(3, (nu, 1))),", "(B, pm_roll(3, [xi, nu]))] points, weights = untangle(data) weights *= 8 * pi", "E3rScheme(\"Stroud-Secrest X\", weights, points, 7, citation) def stroud_secrest_11(): sqrt5 = sqrt(5) sqrt39 =", "[sqrt(15 - p_m * 3 * sqrt(5)) for p_m in [+1, -1]] A", "[sqrt(15 - p_m * 5 * sqrt(5)) for p_m in [+1, -1]] A", "= sqrt(36 + 4 * sqrt39) mu, lmbda = [ sqrt(54 + p_m", "A = (5175 - 13 * sqrt130) / 8820 B = (3870 +", "2 * sqrt195) for p_m in [+1, -1] ] eta = sqrt(36 +", "eta = sqrt(10) A = frac(3, 5) B = frac(2, 75) C =", "(3870 + 283 * sqrt130) / 493920 C = (3204 - 281 *", "Stroud's book: 917568 vs. 197568 D = (4239 + 373 * sqrt130) /", "10 * sqrt39 - p_m * 2 * sqrt195) for p_m in [+1,", "5, citation) def stroud_secrest_08(): nu = sqrt(30) eta = sqrt(10) A = frac(3,", "def stroud_secrest_09(): eta = sqrt(10) xi, nu = [sqrt(15 - p_m * 5", "B = (3870 + 283 * sqrt130) / 493920 C = (3204 -", "in [+1, -1] ] eta = sqrt(36 + 4 * sqrt39) mu, lmbda", "mu, lmbda = [ sqrt(54 + p_m * 18 * sqrt5 + 6", "1))), (C, fsd(3, (xi, 2))), (D, pm(3, eta)), ] points, weights = untangle(data)", "[xi, nu])), (C, pm(3, eta)), (C, pm_roll(3, [lmbda, mu])), ] points, weights =", "frac(3, 5) B = frac(2, 75) C = frac(3, 100) data = [(A,", "] A = (1725 - 26 * sqrt39) / 2940 B = (1065", "weights *= 8 * pi return E3rScheme(\"Stroud-Secrest IX\", weights, points, 5, citation) def", "= sqrt(130) nu = sqrt((720 - 24 * sqrt130) / 11) xi =", "sqrt(130) nu = sqrt((720 - 24 * sqrt130) / 11) xi = sqrt(288", "30) data = [(A, numpy.array([[0, 0, 0]])), (B, pm_roll(3, [xi, nu]))] points, weights", "= frac(1, 50) data = [(A, numpy.array([[0, 0, 0]])), (B, pm(3, eta)), (B,", "weights = untangle(data) weights *= 8 * pi return E3rScheme(\"Stroud-Secrest IX\", weights, points,", "* sqrt195) for p_m in [+1, -1] ] A = (1725 - 26", "nu = sqrt((720 - 24 * sqrt130) / 11) xi = sqrt(288 +", "in Stroud's book: 917568 vs. 197568 D = (4239 + 373 * sqrt130)", "5, citation) def stroud_secrest_10(): sqrt130 = sqrt(130) nu = sqrt((720 - 24 *", "from ..helpers import article, fsd, pm, pm_roll, untangle from ._helpers import E3rScheme citation", "p_m in [+1, -1] ] eta = sqrt(36 + 4 * sqrt39) mu,", "p_m * 18 * sqrt5 + 6 * sqrt39 + p_m * 2", "+ 24 * sqrt130) / 7) A = (5175 - 13 * sqrt130)", "= sqrt(195) nu, xi = [ sqrt(-50 + p_m * 10 * sqrt5", "(3204 - 281 * sqrt130) / 197568 # ERR in Stroud's book: 917568", "E3rScheme citation = article( authors=[\"<NAME>\", \"<NAME>\"], title=\"Approximate integration formulas for certain spherically symmetric", "p_m in [+1, -1]] A = frac(3, 5) B = frac(1, 50) data", "[(A, numpy.array([[0, 0, 0]])), (B, fsd(3, (nu, 1))), (C, pm(3, eta))] points, weights", "[(A, numpy.array([[0, 0, 0]])), (B, pm(3, eta)), (B, pm_roll(3, [xi, nu]))] points, weights", "sqrt130 = sqrt(130) nu = sqrt((720 - 24 * sqrt130) / 11) xi", "points, weights = untangle(data) weights *= 8 * pi return E3rScheme(\"Stroud-Secrest VIII\", weights,", "[lmbda, mu])), ] points, weights = untangle(data) weights *= 8 * pi return", "/ 11) xi = sqrt(288 + 24 * sqrt130) eta = sqrt((-216 +", "0]])), (B, pm_roll(3, [xi, nu])), (C, pm(3, eta)), (C, pm_roll(3, [lmbda, mu])), ]", "eta)), (C, pm_roll(3, [lmbda, mu])), ] points, weights = untangle(data) weights *= 8", "pm, pm_roll, untangle from ._helpers import E3rScheme citation = article( authors=[\"<NAME>\", \"<NAME>\"], title=\"Approximate", "formulas for certain spherically symmetric regions\", journal=\"Math. Comp.\", volume=\"17\", year=\"1963\", pages=\"105-135\", url=\"https://doi.org/10.1090/S0025-5718-1963-0161473-0\", )", "197568 data = [ (A, numpy.array([[0, 0, 0]])), (B, fsd(3, (nu, 1))), (C,", "= (3870 + 283 * sqrt130) / 493920 C = (3204 - 281", "p_m * 10 * sqrt5 + 10 * sqrt39 - p_m * 2", "= frac(1, 30) data = [(A, numpy.array([[0, 0, 0]])), (B, pm_roll(3, [xi, nu]))]", "ERR in Stroud's book: 917568 vs. 197568 D = (4239 + 373 *", "(4239 + 373 * sqrt130) / 197568 data = [ (A, numpy.array([[0, 0,", "= untangle(data) weights *= 8 * pi return E3rScheme(\"Stroud-Secrest VIII\", weights, points, 5,", "+ 283 * sqrt130) / 493920 C = (3204 - 281 * sqrt130)", "+ p_m * 2 * sqrt195) for p_m in [+1, -1] ] A", "for p_m in [+1, -1] ] A = (1725 - 26 * sqrt39)", "data = [(A, numpy.array([[0, 0, 0]])), (B, fsd(3, (nu, 1))), (C, pm(3, eta))]", "sqrt(288 + 24 * sqrt130) eta = sqrt((-216 + 24 * sqrt130) /", "= [sqrt(15 - p_m * 5 * sqrt(5)) for p_m in [+1, -1]]", "= (297 - 47 * sqrt39) / 32928 data = [ (A, numpy.array([[0,", "nu, xi = [ sqrt(-50 + p_m * 10 * sqrt5 + 10", "sqrt((720 - 24 * sqrt130) / 11) xi = sqrt(288 + 24 *", "- 47 * sqrt39) / 32928 data = [ (A, numpy.array([[0, 0, 0]])),", "+ 4 * sqrt39) mu, lmbda = [ sqrt(54 + p_m * 18", "citation) def stroud_secrest_08(): nu = sqrt(30) eta = sqrt(10) A = frac(3, 5)", "Comp.\", volume=\"17\", year=\"1963\", pages=\"105-135\", url=\"https://doi.org/10.1090/S0025-5718-1963-0161473-0\", ) def stroud_secrest_07(): nu, xi = [sqrt(15 -", "- p_m * 3 * sqrt(5)) for p_m in [+1, -1]] A =", "p_m in [+1, -1]] A = frac(3, 5) B = frac(1, 30) data", "in [+1, -1] ] A = (1725 - 26 * sqrt39) / 2940", "def stroud_secrest_10(): sqrt130 = sqrt(130) nu = sqrt((720 - 24 * sqrt130) /", "= sqrt(10) A = frac(3, 5) B = frac(2, 75) C = frac(3,", "A = frac(3, 5) B = frac(1, 50) data = [(A, numpy.array([[0, 0,", "sqrt(36 + 4 * sqrt39) mu, lmbda = [ sqrt(54 + p_m *", "untangle(data) weights *= 8 * pi return E3rScheme(\"Stroud-Secrest XI\", weights, points, 7, citation)", "numpy from sympy import Rational as frac from sympy import pi, sqrt from", "*= 8 * pi return E3rScheme(\"Stroud-Secrest IX\", weights, points, 5, citation) def stroud_secrest_10():", "sympy import pi, sqrt from ..helpers import article, fsd, pm, pm_roll, untangle from", "nu = [sqrt(15 - p_m * 5 * sqrt(5)) for p_m in [+1,", "frac(3, 5) B = frac(1, 50) data = [(A, numpy.array([[0, 0, 0]])), (B,", "data = [(A, numpy.array([[0, 0, 0]])), (B, pm(3, eta)), (B, pm_roll(3, [xi, nu]))]", "sqrt195 = sqrt(195) nu, xi = [ sqrt(-50 + p_m * 10 *", "197568 D = (4239 + 373 * sqrt130) / 197568 data = [", "-1]] A = frac(3, 5) B = frac(1, 30) data = [(A, numpy.array([[0,", "* 5 * sqrt(5)) for p_m in [+1, -1]] A = frac(3, 5)", "B = frac(1, 50) data = [(A, numpy.array([[0, 0, 0]])), (B, pm(3, eta)),", "points, weights = untangle(data) weights *= 8 * pi return E3rScheme(\"Stroud-Secrest IX\", weights,", "for certain spherically symmetric regions\", journal=\"Math. Comp.\", volume=\"17\", year=\"1963\", pages=\"105-135\", url=\"https://doi.org/10.1090/S0025-5718-1963-0161473-0\", ) def", "year=\"1963\", pages=\"105-135\", url=\"https://doi.org/10.1090/S0025-5718-1963-0161473-0\", ) def stroud_secrest_07(): nu, xi = [sqrt(15 - p_m *", "= sqrt(39) sqrt195 = sqrt(195) nu, xi = [ sqrt(-50 + p_m *", "1))), (C, pm(3, eta))] points, weights = untangle(data) weights *= 8 * pi", "5 * sqrt(5)) for p_m in [+1, -1]] A = frac(3, 5) B", "A = frac(3, 5) B = frac(1, 30) data = [(A, numpy.array([[0, 0,", "- p_m * 2 * sqrt195) for p_m in [+1, -1] ] eta", "weights, points, 5, citation) def stroud_secrest_08(): nu = sqrt(30) eta = sqrt(10) A", "sqrt39 - p_m * 2 * sqrt195) for p_m in [+1, -1] ]", "8 * pi return E3rScheme(\"Stroud-Secrest VII\", weights, points, 5, citation) def stroud_secrest_08(): nu", "untangle from ._helpers import E3rScheme citation = article( authors=[\"<NAME>\", \"<NAME>\"], title=\"Approximate integration formulas", "eta = sqrt((-216 + 24 * sqrt130) / 7) A = (5175 -", "C = (3204 - 281 * sqrt130) / 197568 # ERR in Stroud's", "sqrt(-50 + p_m * 10 * sqrt5 + 10 * sqrt39 - p_m", "(B, fsd(3, (nu, 1))), (C, pm(3, eta))] points, weights = untangle(data) weights *=", "xi, nu = [sqrt(15 - p_m * 5 * sqrt(5)) for p_m in", "pi return E3rScheme(\"Stroud-Secrest IX\", weights, points, 5, citation) def stroud_secrest_10(): sqrt130 = sqrt(130)", "= [ sqrt(54 + p_m * 18 * sqrt5 + 6 * sqrt39", "return E3rScheme(\"Stroud-Secrest VII\", weights, points, 5, citation) def stroud_secrest_08(): nu = sqrt(30) eta", "917568 vs. 197568 D = (4239 + 373 * sqrt130) / 197568 data", "D = (4239 + 373 * sqrt130) / 197568 data = [ (A,", "weights = untangle(data) weights *= 8 * pi return E3rScheme(\"Stroud-Secrest X\", weights, points,", "(B, fsd(3, (nu, 1))), (C, fsd(3, (xi, 2))), (D, pm(3, eta)), ] points,", "* sqrt39 - p_m * 2 * sqrt195) for p_m in [+1, -1]", "nu = sqrt(30) eta = sqrt(10) A = frac(3, 5) B = frac(2,", "= untangle(data) weights *= 8 * pi return E3rScheme(\"Stroud-Secrest X\", weights, points, 7,", "nu, xi = [sqrt(15 - p_m * 3 * sqrt(5)) for p_m in", "sqrt39) mu, lmbda = [ sqrt(54 + p_m * 18 * sqrt5 +", "regions\", journal=\"Math. Comp.\", volume=\"17\", year=\"1963\", pages=\"105-135\", url=\"https://doi.org/10.1090/S0025-5718-1963-0161473-0\", ) def stroud_secrest_07(): nu, xi =", "untangle(data) weights *= 8 * pi return E3rScheme(\"Stroud-Secrest VII\", weights, points, 5, citation)", "281 * sqrt130) / 197568 # ERR in Stroud's book: 917568 vs. 197568", "sympy import Rational as frac from sympy import pi, sqrt from ..helpers import", "* sqrt130) / 493920 C = (3204 - 281 * sqrt130) / 197568", "p_m * 2 * sqrt195) for p_m in [+1, -1] ] eta =", "mu])), ] points, weights = untangle(data) weights *= 8 * pi return E3rScheme(\"Stroud-Secrest", "24 * sqrt130) / 11) xi = sqrt(288 + 24 * sqrt130) eta", "= [sqrt(15 - p_m * 3 * sqrt(5)) for p_m in [+1, -1]]", "vs. 197568 D = (4239 + 373 * sqrt130) / 197568 data =", "0]])), (B, pm_roll(3, [xi, nu]))] points, weights = untangle(data) weights *= 8 *", "= (3204 - 281 * sqrt130) / 197568 # ERR in Stroud's book:", "weights *= 8 * pi return E3rScheme(\"Stroud-Secrest VII\", weights, points, 5, citation) def", "+ 171 * sqrt39) / 54880 C = (297 - 47 * sqrt39)", "for p_m in [+1, -1]] A = frac(3, 5) B = frac(1, 30)", "A = (1725 - 26 * sqrt39) / 2940 B = (1065 +", "= article( authors=[\"<NAME>\", \"<NAME>\"], title=\"Approximate integration formulas for certain spherically symmetric regions\", journal=\"Math.", "= (5175 - 13 * sqrt130) / 8820 B = (3870 + 283", "0, 0]])), (B, pm_roll(3, [xi, nu]))] points, weights = untangle(data) weights *= 8", "B = frac(2, 75) C = frac(3, 100) data = [(A, numpy.array([[0, 0,", "8 * pi return E3rScheme(\"Stroud-Secrest VIII\", weights, points, 5, citation) def stroud_secrest_09(): eta", "6 * sqrt39 + p_m * 2 * sqrt195) for p_m in [+1,", "sqrt(10) xi, nu = [sqrt(15 - p_m * 5 * sqrt(5)) for p_m", "= untangle(data) weights *= 8 * pi return E3rScheme(\"Stroud-Secrest VII\", weights, points, 5,", "[+1, -1] ] A = (1725 - 26 * sqrt39) / 2940 B", "0]])), (B, pm(3, eta)), (B, pm_roll(3, [xi, nu]))] points, weights = untangle(data) weights", "0, 0]])), (B, fsd(3, (nu, 1))), (C, pm(3, eta))] points, weights = untangle(data)", "weights = untangle(data) weights *= 8 * pi return E3rScheme(\"Stroud-Secrest VII\", weights, points,", "pm(3, eta)), (C, pm_roll(3, [lmbda, mu])), ] points, weights = untangle(data) weights *=", "p_m * 5 * sqrt(5)) for p_m in [+1, -1]] A = frac(3,", "return E3rScheme(\"Stroud-Secrest VIII\", weights, points, 5, citation) def stroud_secrest_09(): eta = sqrt(10) xi,", "= untangle(data) weights *= 8 * pi return E3rScheme(\"Stroud-Secrest XI\", weights, points, 7,", "*= 8 * pi return E3rScheme(\"Stroud-Secrest X\", weights, points, 7, citation) def stroud_secrest_11():", "eta))] points, weights = untangle(data) weights *= 8 * pi return E3rScheme(\"Stroud-Secrest VIII\",", "sqrt39) / 2940 B = (1065 + 171 * sqrt39) / 54880 C", "-1] ] A = (1725 - 26 * sqrt39) / 2940 B =", "* 18 * sqrt5 + 6 * sqrt39 + p_m * 2 *", "B = (1065 + 171 * sqrt39) / 54880 C = (297 -", "VIII\", weights, points, 5, citation) def stroud_secrest_09(): eta = sqrt(10) xi, nu =", "eta)), (B, pm_roll(3, [xi, nu]))] points, weights = untangle(data) weights *= 8 *", "* pi return E3rScheme(\"Stroud-Secrest X\", weights, points, 7, citation) def stroud_secrest_11(): sqrt5 =", "weights, points, 5, citation) def stroud_secrest_10(): sqrt130 = sqrt(130) nu = sqrt((720 -", "sqrt(5) sqrt39 = sqrt(39) sqrt195 = sqrt(195) nu, xi = [ sqrt(-50 +", "= untangle(data) weights *= 8 * pi return E3rScheme(\"Stroud-Secrest IX\", weights, points, 5,", "eta = sqrt(10) xi, nu = [sqrt(15 - p_m * 5 * sqrt(5))", "(A, numpy.array([[0, 0, 0]])), (B, pm_roll(3, [xi, nu])), (C, pm(3, eta)), (C, pm_roll(3,", "= (1065 + 171 * sqrt39) / 54880 C = (297 - 47", "* sqrt(5)) for p_m in [+1, -1]] A = frac(3, 5) B =", "frac(1, 50) data = [(A, numpy.array([[0, 0, 0]])), (B, pm(3, eta)), (B, pm_roll(3,", "numpy.array([[0, 0, 0]])), (B, pm(3, eta)), (B, pm_roll(3, [xi, nu]))] points, weights =", "= [ (A, numpy.array([[0, 0, 0]])), (B, fsd(3, (nu, 1))), (C, fsd(3, (xi,", "= sqrt((720 - 24 * sqrt130) / 11) xi = sqrt(288 + 24", "xi = [ sqrt(-50 + p_m * 10 * sqrt5 + 10 *", "sqrt130) / 493920 C = (3204 - 281 * sqrt130) / 197568 #", "(C, fsd(3, (xi, 2))), (D, pm(3, eta)), ] points, weights = untangle(data) weights", "untangle(data) weights *= 8 * pi return E3rScheme(\"Stroud-Secrest VIII\", weights, points, 5, citation)", "points, 5, citation) def stroud_secrest_08(): nu = sqrt(30) eta = sqrt(10) A =", "* pi return E3rScheme(\"Stroud-Secrest VII\", weights, points, 5, citation) def stroud_secrest_08(): nu =", "= [(A, numpy.array([[0, 0, 0]])), (B, pm_roll(3, [xi, nu]))] points, weights = untangle(data)", "weights = untangle(data) weights *= 8 * pi return E3rScheme(\"Stroud-Secrest VIII\", weights, points,", "*= 8 * pi return E3rScheme(\"Stroud-Secrest VIII\", weights, points, 5, citation) def stroud_secrest_09():", "/ 493920 C = (3204 - 281 * sqrt130) / 197568 # ERR", "+ p_m * 10 * sqrt5 + 10 * sqrt39 - p_m *", "pi, sqrt from ..helpers import article, fsd, pm, pm_roll, untangle from ._helpers import", "2 * sqrt195) for p_m in [+1, -1] ] A = (1725 -", "0]])), (B, fsd(3, (nu, 1))), (C, fsd(3, (xi, 2))), (D, pm(3, eta)), ]", "100) data = [(A, numpy.array([[0, 0, 0]])), (B, fsd(3, (nu, 1))), (C, pm(3,", "= [(A, numpy.array([[0, 0, 0]])), (B, pm(3, eta)), (B, pm_roll(3, [xi, nu]))] points,", "* 10 * sqrt5 + 10 * sqrt39 - p_m * 2 *", "* sqrt5 + 10 * sqrt39 - p_m * 2 * sqrt195) for", "volume=\"17\", year=\"1963\", pages=\"105-135\", url=\"https://doi.org/10.1090/S0025-5718-1963-0161473-0\", ) def stroud_secrest_07(): nu, xi = [sqrt(15 - p_m", "sqrt(39) sqrt195 = sqrt(195) nu, xi = [ sqrt(-50 + p_m * 10", "= sqrt(10) xi, nu = [sqrt(15 - p_m * 5 * sqrt(5)) for", "sqrt5 + 10 * sqrt39 - p_m * 2 * sqrt195) for p_m", "+ 10 * sqrt39 - p_m * 2 * sqrt195) for p_m in", "/ 32928 data = [ (A, numpy.array([[0, 0, 0]])), (B, pm_roll(3, [xi, nu])),", "* pi return E3rScheme(\"Stroud-Secrest VIII\", weights, points, 5, citation) def stroud_secrest_09(): eta =", "title=\"Approximate integration formulas for certain spherically symmetric regions\", journal=\"Math. Comp.\", volume=\"17\", year=\"1963\", pages=\"105-135\",", "- 13 * sqrt130) / 8820 B = (3870 + 283 * sqrt130)", "4 * sqrt39) mu, lmbda = [ sqrt(54 + p_m * 18 *", "sqrt(30) eta = sqrt(10) A = frac(3, 5) B = frac(2, 75) C", "13 * sqrt130) / 8820 B = (3870 + 283 * sqrt130) /", "2940 B = (1065 + 171 * sqrt39) / 54880 C = (297", "p_m * 3 * sqrt(5)) for p_m in [+1, -1]] A = frac(3,", "= [(A, numpy.array([[0, 0, 0]])), (B, fsd(3, (nu, 1))), (C, pm(3, eta))] points,", "+ 6 * sqrt39 + p_m * 2 * sqrt195) for p_m in", "= frac(2, 75) C = frac(3, 100) data = [(A, numpy.array([[0, 0, 0]])),", "C = (297 - 47 * sqrt39) / 32928 data = [ (A,", "sqrt((-216 + 24 * sqrt130) / 7) A = (5175 - 13 *", "certain spherically symmetric regions\", journal=\"Math. Comp.\", volume=\"17\", year=\"1963\", pages=\"105-135\", url=\"https://doi.org/10.1090/S0025-5718-1963-0161473-0\", ) def stroud_secrest_07():", "- 281 * sqrt130) / 197568 # ERR in Stroud's book: 917568 vs.", "/ 197568 # ERR in Stroud's book: 917568 vs. 197568 D = (4239", "+ 373 * sqrt130) / 197568 data = [ (A, numpy.array([[0, 0, 0]])),", "article, fsd, pm, pm_roll, untangle from ._helpers import E3rScheme citation = article( authors=[\"<NAME>\",", "(B, pm(3, eta)), (B, pm_roll(3, [xi, nu]))] points, weights = untangle(data) weights *=", "as frac from sympy import pi, sqrt from ..helpers import article, fsd, pm,", "47 * sqrt39) / 32928 data = [ (A, numpy.array([[0, 0, 0]])), (B,", "fsd(3, (nu, 1))), (C, fsd(3, (xi, 2))), (D, pm(3, eta)), ] points, weights", "from sympy import Rational as frac from sympy import pi, sqrt from ..helpers", "] points, weights = untangle(data) weights *= 8 * pi return E3rScheme(\"Stroud-Secrest X\",", "xi = [sqrt(15 - p_m * 3 * sqrt(5)) for p_m in [+1,", "IX\", weights, points, 5, citation) def stroud_secrest_10(): sqrt130 = sqrt(130) nu = sqrt((720", "= frac(3, 5) B = frac(1, 50) data = [(A, numpy.array([[0, 0, 0]])),", "points, weights = untangle(data) weights *= 8 * pi return E3rScheme(\"Stroud-Secrest XI\", weights,", "sqrt(10) A = frac(3, 5) B = frac(2, 75) C = frac(3, 100)", "sqrt130) / 197568 data = [ (A, numpy.array([[0, 0, 0]])), (B, fsd(3, (nu,", "= frac(3, 5) B = frac(1, 30) data = [(A, numpy.array([[0, 0, 0]])),", "/ 7) A = (5175 - 13 * sqrt130) / 8820 B =", "fsd(3, (nu, 1))), (C, pm(3, eta))] points, weights = untangle(data) weights *= 8", "* sqrt195) for p_m in [+1, -1] ] eta = sqrt(36 + 4", "8820 B = (3870 + 283 * sqrt130) / 493920 C = (3204", "pi return E3rScheme(\"Stroud-Secrest VII\", weights, points, 5, citation) def stroud_secrest_08(): nu = sqrt(30)", "* 2 * sqrt195) for p_m in [+1, -1] ] A = (1725", "(C, pm(3, eta))] points, weights = untangle(data) weights *= 8 * pi return", "] points, weights = untangle(data) weights *= 8 * pi return E3rScheme(\"Stroud-Secrest XI\",", "* sqrt5 + 6 * sqrt39 + p_m * 2 * sqrt195) for", "in [+1, -1]] A = frac(3, 5) B = frac(1, 30) data =", "sqrt39) / 54880 C = (297 - 47 * sqrt39) / 32928 data", "* sqrt39) mu, lmbda = [ sqrt(54 + p_m * 18 * sqrt5", "E3rScheme(\"Stroud-Secrest VII\", weights, points, 5, citation) def stroud_secrest_08(): nu = sqrt(30) eta =", "(C, pm_roll(3, [lmbda, mu])), ] points, weights = untangle(data) weights *= 8 *", "weights *= 8 * pi return E3rScheme(\"Stroud-Secrest X\", weights, points, 7, citation) def", "75) C = frac(3, 100) data = [(A, numpy.array([[0, 0, 0]])), (B, fsd(3,", "* 3 * sqrt(5)) for p_m in [+1, -1]] A = frac(3, 5)", "/ 197568 data = [ (A, numpy.array([[0, 0, 0]])), (B, fsd(3, (nu, 1))),", "untangle(data) weights *= 8 * pi return E3rScheme(\"Stroud-Secrest X\", weights, points, 7, citation)", "(nu, 1))), (C, pm(3, eta))] points, weights = untangle(data) weights *= 8 *", "pm(3, eta)), ] points, weights = untangle(data) weights *= 8 * pi return", "[ sqrt(54 + p_m * 18 * sqrt5 + 6 * sqrt39 +", "+ 24 * sqrt130) eta = sqrt((-216 + 24 * sqrt130) / 7)", "[+1, -1] ] eta = sqrt(36 + 4 * sqrt39) mu, lmbda =", "(297 - 47 * sqrt39) / 32928 data = [ (A, numpy.array([[0, 0,", "stroud_secrest_11(): sqrt5 = sqrt(5) sqrt39 = sqrt(39) sqrt195 = sqrt(195) nu, xi =", "article( authors=[\"<NAME>\", \"<NAME>\"], title=\"Approximate integration formulas for certain spherically symmetric regions\", journal=\"Math. Comp.\",", "sqrt39 + p_m * 2 * sqrt195) for p_m in [+1, -1] ]", "fsd, pm, pm_roll, untangle from ._helpers import E3rScheme citation = article( authors=[\"<NAME>\", \"<NAME>\"],", "sqrt(195) nu, xi = [ sqrt(-50 + p_m * 10 * sqrt5 +", "* sqrt39 + p_m * 2 * sqrt195) for p_m in [+1, -1]", "/ 2940 B = (1065 + 171 * sqrt39) / 54880 C =", "- 24 * sqrt130) / 11) xi = sqrt(288 + 24 * sqrt130)", "stroud_secrest_09(): eta = sqrt(10) xi, nu = [sqrt(15 - p_m * 5 *", "citation) def stroud_secrest_10(): sqrt130 = sqrt(130) nu = sqrt((720 - 24 * sqrt130)", "(1725 - 26 * sqrt39) / 2940 B = (1065 + 171 *", "authors=[\"<NAME>\", \"<NAME>\"], title=\"Approximate integration formulas for certain spherically symmetric regions\", journal=\"Math. Comp.\", volume=\"17\",", "sqrt(5)) for p_m in [+1, -1]] A = frac(3, 5) B = frac(1,", "= sqrt((-216 + 24 * sqrt130) / 7) A = (5175 - 13", "numpy.array([[0, 0, 0]])), (B, pm_roll(3, [xi, nu]))] points, weights = untangle(data) weights *=", "50) data = [(A, numpy.array([[0, 0, 0]])), (B, pm(3, eta)), (B, pm_roll(3, [xi,", "stroud_secrest_08(): nu = sqrt(30) eta = sqrt(10) A = frac(3, 5) B =", "* sqrt130) eta = sqrt((-216 + 24 * sqrt130) / 7) A =", "18 * sqrt5 + 6 * sqrt39 + p_m * 2 * sqrt195)", "= (4239 + 373 * sqrt130) / 197568 data = [ (A, numpy.array([[0,", "return E3rScheme(\"Stroud-Secrest IX\", weights, points, 5, citation) def stroud_secrest_10(): sqrt130 = sqrt(130) nu", "numpy.array([[0, 0, 0]])), (B, pm_roll(3, [xi, nu])), (C, pm(3, eta)), (C, pm_roll(3, [lmbda,", "[ sqrt(-50 + p_m * 10 * sqrt5 + 10 * sqrt39 -", "frac(3, 100) data = [(A, numpy.array([[0, 0, 0]])), (B, fsd(3, (nu, 1))), (C,", "points, 5, citation) def stroud_secrest_10(): sqrt130 = sqrt(130) nu = sqrt((720 - 24", "B = frac(1, 30) data = [(A, numpy.array([[0, 0, 0]])), (B, pm_roll(3, [xi,", "8 * pi return E3rScheme(\"Stroud-Secrest IX\", weights, points, 5, citation) def stroud_secrest_10(): sqrt130", "24 * sqrt130) / 7) A = (5175 - 13 * sqrt130) /", "= sqrt(5) sqrt39 = sqrt(39) sqrt195 = sqrt(195) nu, xi = [ sqrt(-50", "E3rScheme(\"Stroud-Secrest VIII\", weights, points, 5, citation) def stroud_secrest_09(): eta = sqrt(10) xi, nu", "sqrt130) / 8820 B = (3870 + 283 * sqrt130) / 493920 C", "eta = sqrt(36 + 4 * sqrt39) mu, lmbda = [ sqrt(54 +", "373 * sqrt130) / 197568 data = [ (A, numpy.array([[0, 0, 0]])), (B,", "26 * sqrt39) / 2940 B = (1065 + 171 * sqrt39) /", "..helpers import article, fsd, pm, pm_roll, untangle from ._helpers import E3rScheme citation =", "7) A = (5175 - 13 * sqrt130) / 8820 B = (3870", "frac from sympy import pi, sqrt from ..helpers import article, fsd, pm, pm_roll,", "= frac(3, 5) B = frac(2, 75) C = frac(3, 100) data =", "weights, points, 7, citation) def stroud_secrest_11(): sqrt5 = sqrt(5) sqrt39 = sqrt(39) sqrt195", "* sqrt130) / 8820 B = (3870 + 283 * sqrt130) / 493920", "+ p_m * 18 * sqrt5 + 6 * sqrt39 + p_m *", "(5175 - 13 * sqrt130) / 8820 B = (3870 + 283 *", "0, 0]])), (B, pm_roll(3, [xi, nu])), (C, pm(3, eta)), (C, pm_roll(3, [lmbda, mu])),", "weights, points, 5, citation) def stroud_secrest_09(): eta = sqrt(10) xi, nu = [sqrt(15", "E3rScheme(\"Stroud-Secrest IX\", weights, points, 5, citation) def stroud_secrest_10(): sqrt130 = sqrt(130) nu =", "197568 # ERR in Stroud's book: 917568 vs. 197568 D = (4239 +", "for p_m in [+1, -1] ] eta = sqrt(36 + 4 * sqrt39)", "nu])), (C, pm(3, eta)), (C, pm_roll(3, [lmbda, mu])), ] points, weights = untangle(data)", "sqrt130) / 7) A = (5175 - 13 * sqrt130) / 8820 B", "import pi, sqrt from ..helpers import article, fsd, pm, pm_roll, untangle from ._helpers", "8 * pi return E3rScheme(\"Stroud-Secrest X\", weights, points, 7, citation) def stroud_secrest_11(): sqrt5", "* pi return E3rScheme(\"Stroud-Secrest IX\", weights, points, 5, citation) def stroud_secrest_10(): sqrt130 =", "* sqrt39) / 2940 B = (1065 + 171 * sqrt39) / 54880", "numpy.array([[0, 0, 0]])), (B, fsd(3, (nu, 1))), (C, fsd(3, (xi, 2))), (D, pm(3,", "data = [ (A, numpy.array([[0, 0, 0]])), (B, pm_roll(3, [xi, nu])), (C, pm(3,", "(B, pm_roll(3, [xi, nu])), (C, pm(3, eta)), (C, pm_roll(3, [lmbda, mu])), ] points,", "import numpy from sympy import Rational as frac from sympy import pi, sqrt", "data = [ (A, numpy.array([[0, 0, 0]])), (B, fsd(3, (nu, 1))), (C, fsd(3,", "._helpers import E3rScheme citation = article( authors=[\"<NAME>\", \"<NAME>\"], title=\"Approximate integration formulas for certain", "import Rational as frac from sympy import pi, sqrt from ..helpers import article,", "(1065 + 171 * sqrt39) / 54880 C = (297 - 47 *", "7, citation) def stroud_secrest_11(): sqrt5 = sqrt(5) sqrt39 = sqrt(39) sqrt195 = sqrt(195)", "pi return E3rScheme(\"Stroud-Secrest VIII\", weights, points, 5, citation) def stroud_secrest_09(): eta = sqrt(10)", "24 * sqrt130) eta = sqrt((-216 + 24 * sqrt130) / 7) A", "32928 data = [ (A, numpy.array([[0, 0, 0]])), (B, pm_roll(3, [xi, nu])), (C,", "points, weights = untangle(data) weights *= 8 * pi return E3rScheme(\"Stroud-Secrest X\", weights,", "stroud_secrest_07(): nu, xi = [sqrt(15 - p_m * 3 * sqrt(5)) for p_m", "*= 8 * pi return E3rScheme(\"Stroud-Secrest VII\", weights, points, 5, citation) def stroud_secrest_08():", "numpy.array([[0, 0, 0]])), (B, fsd(3, (nu, 1))), (C, pm(3, eta))] points, weights =", "[(A, numpy.array([[0, 0, 0]])), (B, pm_roll(3, [xi, nu]))] points, weights = untangle(data) weights", "= (1725 - 26 * sqrt39) / 2940 B = (1065 + 171", "xi = sqrt(288 + 24 * sqrt130) eta = sqrt((-216 + 24 *", "* sqrt130) / 197568 # ERR in Stroud's book: 917568 vs. 197568 D", "sqrt from ..helpers import article, fsd, pm, pm_roll, untangle from ._helpers import E3rScheme", "0, 0]])), (B, pm(3, eta)), (B, pm_roll(3, [xi, nu]))] points, weights = untangle(data)", "-1]] A = frac(3, 5) B = frac(1, 50) data = [(A, numpy.array([[0,", "= [ (A, numpy.array([[0, 0, 0]])), (B, pm_roll(3, [xi, nu])), (C, pm(3, eta)),", "[+1, -1]] A = frac(3, 5) B = frac(1, 30) data = [(A,", "frac(3, 5) B = frac(1, 30) data = [(A, numpy.array([[0, 0, 0]])), (B,", "in [+1, -1]] A = frac(3, 5) B = frac(1, 50) data =", "data = [(A, numpy.array([[0, 0, 0]])), (B, pm_roll(3, [xi, nu]))] points, weights =", "stroud_secrest_10(): sqrt130 = sqrt(130) nu = sqrt((720 - 24 * sqrt130) / 11)", "sqrt39) / 32928 data = [ (A, numpy.array([[0, 0, 0]])), (B, pm_roll(3, [xi,", "= sqrt(288 + 24 * sqrt130) eta = sqrt((-216 + 24 * sqrt130)", "10 * sqrt5 + 10 * sqrt39 - p_m * 2 * sqrt195)", "[xi, nu]))] points, weights = untangle(data) weights *= 8 * pi return E3rScheme(\"Stroud-Secrest", "p_m in [+1, -1] ] A = (1725 - 26 * sqrt39) /", "import E3rScheme citation = article( authors=[\"<NAME>\", \"<NAME>\"], title=\"Approximate integration formulas for certain spherically", "3 * sqrt(5)) for p_m in [+1, -1]] A = frac(3, 5) B", "sqrt130) eta = sqrt((-216 + 24 * sqrt130) / 7) A = (5175", "points, weights = untangle(data) weights *= 8 * pi return E3rScheme(\"Stroud-Secrest VII\", weights,", "def stroud_secrest_11(): sqrt5 = sqrt(5) sqrt39 = sqrt(39) sqrt195 = sqrt(195) nu, xi", "sqrt195) for p_m in [+1, -1] ] A = (1725 - 26 *", "symmetric regions\", journal=\"Math. Comp.\", volume=\"17\", year=\"1963\", pages=\"105-135\", url=\"https://doi.org/10.1090/S0025-5718-1963-0161473-0\", ) def stroud_secrest_07(): nu, xi", "283 * sqrt130) / 493920 C = (3204 - 281 * sqrt130) /", "* sqrt130) / 7) A = (5175 - 13 * sqrt130) / 8820", "sqrt130) / 11) xi = sqrt(288 + 24 * sqrt130) eta = sqrt((-216", "0, 0]])), (B, fsd(3, (nu, 1))), (C, fsd(3, (xi, 2))), (D, pm(3, eta)),", "frac(1, 30) data = [(A, numpy.array([[0, 0, 0]])), (B, pm_roll(3, [xi, nu]))] points,", "(A, numpy.array([[0, 0, 0]])), (B, fsd(3, (nu, 1))), (C, fsd(3, (xi, 2))), (D,", "5) B = frac(1, 50) data = [(A, numpy.array([[0, 0, 0]])), (B, pm(3,", "sqrt5 + 6 * sqrt39 + p_m * 2 * sqrt195) for p_m", "* sqrt39) / 32928 data = [ (A, numpy.array([[0, 0, 0]])), (B, pm_roll(3,", "spherically symmetric regions\", journal=\"Math. Comp.\", volume=\"17\", year=\"1963\", pages=\"105-135\", url=\"https://doi.org/10.1090/S0025-5718-1963-0161473-0\", ) def stroud_secrest_07(): nu,", "pm_roll, untangle from ._helpers import E3rScheme citation = article( authors=[\"<NAME>\", \"<NAME>\"], title=\"Approximate integration", "url=\"https://doi.org/10.1090/S0025-5718-1963-0161473-0\", ) def stroud_secrest_07(): nu, xi = [sqrt(15 - p_m * 3 *", ") def stroud_secrest_07(): nu, xi = [sqrt(15 - p_m * 3 * sqrt(5))", "C = frac(3, 100) data = [(A, numpy.array([[0, 0, 0]])), (B, fsd(3, (nu,", "pm(3, eta))] points, weights = untangle(data) weights *= 8 * pi return E3rScheme(\"Stroud-Secrest", "citation) def stroud_secrest_09(): eta = sqrt(10) xi, nu = [sqrt(15 - p_m *", "] eta = sqrt(36 + 4 * sqrt39) mu, lmbda = [ sqrt(54", "pm(3, eta)), (B, pm_roll(3, [xi, nu]))] points, weights = untangle(data) weights *= 8", "pages=\"105-135\", url=\"https://doi.org/10.1090/S0025-5718-1963-0161473-0\", ) def stroud_secrest_07(): nu, xi = [sqrt(15 - p_m * 3", "lmbda = [ sqrt(54 + p_m * 18 * sqrt5 + 6 *", "from sympy import pi, sqrt from ..helpers import article, fsd, pm, pm_roll, untangle", "def stroud_secrest_07(): nu, xi = [sqrt(15 - p_m * 3 * sqrt(5)) for", "frac(2, 75) C = frac(3, 100) data = [(A, numpy.array([[0, 0, 0]])), (B,", "journal=\"Math. Comp.\", volume=\"17\", year=\"1963\", pages=\"105-135\", url=\"https://doi.org/10.1090/S0025-5718-1963-0161473-0\", ) def stroud_secrest_07(): nu, xi = [sqrt(15", "citation) def stroud_secrest_11(): sqrt5 = sqrt(5) sqrt39 = sqrt(39) sqrt195 = sqrt(195) nu,", "-1] ] eta = sqrt(36 + 4 * sqrt39) mu, lmbda = [", "Rational as frac from sympy import pi, sqrt from ..helpers import article, fsd,", "from ._helpers import E3rScheme citation = article( authors=[\"<NAME>\", \"<NAME>\"], title=\"Approximate integration formulas for", "= sqrt(30) eta = sqrt(10) A = frac(3, 5) B = frac(2, 75)", "pm_roll(3, [lmbda, mu])), ] points, weights = untangle(data) weights *= 8 * pi", "54880 C = (297 - 47 * sqrt39) / 32928 data = [", "A = frac(3, 5) B = frac(2, 75) C = frac(3, 100) data", "integration formulas for certain spherically symmetric regions\", journal=\"Math. Comp.\", volume=\"17\", year=\"1963\", pages=\"105-135\", url=\"https://doi.org/10.1090/S0025-5718-1963-0161473-0\",", "- p_m * 5 * sqrt(5)) for p_m in [+1, -1]] A =", "weights *= 8 * pi return E3rScheme(\"Stroud-Secrest VIII\", weights, points, 5, citation) def", "pm_roll(3, [xi, nu]))] points, weights = untangle(data) weights *= 8 * pi return", "X\", weights, points, 7, citation) def stroud_secrest_11(): sqrt5 = sqrt(5) sqrt39 = sqrt(39)", "5) B = frac(1, 30) data = [(A, numpy.array([[0, 0, 0]])), (B, pm_roll(3,", "sqrt39 = sqrt(39) sqrt195 = sqrt(195) nu, xi = [ sqrt(-50 + p_m", "(D, pm(3, eta)), ] points, weights = untangle(data) weights *= 8 * pi", "weights = untangle(data) weights *= 8 * pi return E3rScheme(\"Stroud-Secrest XI\", weights, points,", "* sqrt130) / 197568 data = [ (A, numpy.array([[0, 0, 0]])), (B, fsd(3,", "book: 917568 vs. 197568 D = (4239 + 373 * sqrt130) / 197568", "* 2 * sqrt195) for p_m in [+1, -1] ] eta = sqrt(36", "# ERR in Stroud's book: 917568 vs. 197568 D = (4239 + 373", "[+1, -1]] A = frac(3, 5) B = frac(1, 50) data = [(A,", "2))), (D, pm(3, eta)), ] points, weights = untangle(data) weights *= 8 *", "pi return E3rScheme(\"Stroud-Secrest X\", weights, points, 7, citation) def stroud_secrest_11(): sqrt5 = sqrt(5)", "pm_roll(3, [xi, nu])), (C, pm(3, eta)), (C, pm_roll(3, [lmbda, mu])), ] points, weights", "(xi, 2))), (D, pm(3, eta)), ] points, weights = untangle(data) weights *= 8", "sqrt5 = sqrt(5) sqrt39 = sqrt(39) sqrt195 = sqrt(195) nu, xi = [", "0]])), (B, fsd(3, (nu, 1))), (C, pm(3, eta))] points, weights = untangle(data) weights", "5) B = frac(2, 75) C = frac(3, 100) data = [(A, numpy.array([[0,", "5, citation) def stroud_secrest_09(): eta = sqrt(10) xi, nu = [sqrt(15 - p_m", "sqrt130) / 197568 # ERR in Stroud's book: 917568 vs. 197568 D =", "nu]))] points, weights = untangle(data) weights *= 8 * pi return E3rScheme(\"Stroud-Secrest VII\",", "eta)), ] points, weights = untangle(data) weights *= 8 * pi return E3rScheme(\"Stroud-Secrest", "sqrt(54 + p_m * 18 * sqrt5 + 6 * sqrt39 + p_m", "11) xi = sqrt(288 + 24 * sqrt130) eta = sqrt((-216 + 24", "def stroud_secrest_08(): nu = sqrt(30) eta = sqrt(10) A = frac(3, 5) B", "(C, pm(3, eta)), (C, pm_roll(3, [lmbda, mu])), ] points, weights = untangle(data) weights", "VII\", weights, points, 5, citation) def stroud_secrest_08(): nu = sqrt(30) eta = sqrt(10)", "import article, fsd, pm, pm_roll, untangle from ._helpers import E3rScheme citation = article(", "nu]))] points, weights = untangle(data) weights *= 8 * pi return E3rScheme(\"Stroud-Secrest IX\",", "p_m * 2 * sqrt195) for p_m in [+1, -1] ] A =", "points, 5, citation) def stroud_secrest_09(): eta = sqrt(10) xi, nu = [sqrt(15 -", "= [ sqrt(-50 + p_m * 10 * sqrt5 + 10 * sqrt39", "\"<NAME>\"], title=\"Approximate integration formulas for certain spherically symmetric regions\", journal=\"Math. Comp.\", volume=\"17\", year=\"1963\",", "171 * sqrt39) / 54880 C = (297 - 47 * sqrt39) /", "/ 54880 C = (297 - 47 * sqrt39) / 32928 data =", "untangle(data) weights *= 8 * pi return E3rScheme(\"Stroud-Secrest IX\", weights, points, 5, citation)", "fsd(3, (xi, 2))), (D, pm(3, eta)), ] points, weights = untangle(data) weights *=", "493920 C = (3204 - 281 * sqrt130) / 197568 # ERR in", "/ 8820 B = (3870 + 283 * sqrt130) / 493920 C =", "return E3rScheme(\"Stroud-Secrest X\", weights, points, 7, citation) def stroud_secrest_11(): sqrt5 = sqrt(5) sqrt39", "for p_m in [+1, -1]] A = frac(3, 5) B = frac(1, 50)", "(nu, 1))), (C, fsd(3, (xi, 2))), (D, pm(3, eta)), ] points, weights =" ]
[ "or SafeOP.eq(a,b) @staticmethod def le(a,b): return SafeOP.lt(a,b) or SafeOP.eq(a,b) @staticmethod def ne(a,b): return", "\"abiFilters\": OPTIONAL [ { name STRING REQUIRED rvalue HEXSTR OR INT REQUIRED type", "def __init__(self,fltr): self._name = fltr[\"name\"] type_factory = getattr(Types,fltr[\"type\"]) self._rvalue = type_factory(fltr[\"rvalue\"]) self._type_factory =", "= fltr.get(\"addressFilter\",None) self._addressFilter = AddressFilter(af) if af else None self._abiFilters = [ABIFilter(f) for", "log = logging.getLogger(__name__) \"\"\" { \"addressFilter\":<address> OPTIONAL, \"bnFilter\":<uint256> OPTIONAL, \"abiFilters\": OPTIONAL [ {", "return all([abitest,addrtest,bntest]) @property def callback(self): return self._callback class AddressFilter(object): __slots__=[\"_value\"] def __init__(self,value): self._value", "value def test(self,rvalue): return self._value == rvalue class BNFilter(object): __slots__=[\"_rvalue\",\"_op\"] def __init__(self,rvalue,op): self._rvalue", "type_factory self._op = fltr[\"op\"] assert SafeOP.ops(self._op) def test(self,event): lvalue = event[self._name] lvalue =", "import logging log = logging.getLogger(__name__) \"\"\" { \"addressFilter\":<address> OPTIONAL, \"bnFilter\":<uint256> OPTIONAL, \"abiFilters\": OPTIONAL", "self._callback class AddressFilter(object): __slots__=[\"_value\"] def __init__(self,value): self._value = value def test(self,rvalue): return self._value", "type STRING REQUIRED op STRING REQUIRED } ] } \"\"\" #all and logic", "self._topic async def test(self, event_log, event): if self._abiFilters: abitest = all([filtr.test(event) for filtr", "f in fltr.get(\"abiFilters\",[])] @property def topic(self): return self._topic async def test(self, event_log, event):", "else True return all([abitest,addrtest,bntest]) @property def callback(self): return self._callback class AddressFilter(object): __slots__=[\"_value\"] def", "from .types import Types import logging log = logging.getLogger(__name__) \"\"\" { \"addressFilter\":<address> OPTIONAL,", "__slots__=[\"_rvalue\",\"_op\"] def __init__(self,rvalue,op): self._rvalue = rvalue Types.checkUint256(self._rvalue) self._op = op assert SafeOP.ops(self._op) def", "event): if self._abiFilters: abitest = all([filtr.test(event) for filtr in self._abiFilters]) else: abitest =", "bool return test class ABIFilter(object): __slots__=[\"_rvalue\",\"_op\",\"_name\",\"_type\"] def __init__(self,fltr): self._name = fltr[\"name\"] type_factory =", "assert SafeOP.ops(self._op) def test(self,event): lvalue = event[self._name] lvalue = self._type_factory(lvalue) result = getattr(SafeOP,self._op)(event[self._name],self._rvalue)", "@staticmethod def ops(op): return op in ['eq','gt','lt','ge','le','ne',] @staticmethod def eq(a,b): return a ==", "class SafeOP(object): slots = ['_ops'] @staticmethod def ops(op): return op in ['eq','gt','lt','ge','le','ne',] @staticmethod", "a == b @staticmethod def gt(a,b): return a > b @staticmethod def lt(a,b):", "= fltr[\"name\"] type_factory = getattr(Types,fltr[\"type\"]) self._rvalue = type_factory(fltr[\"rvalue\"]) self._type_factory = type_factory self._op =", "<gh_stars>1-10 from .types import Types import logging log = logging.getLogger(__name__) \"\"\" { \"addressFilter\":<address>", "STRING REQUIRED rvalue HEXSTR OR INT REQUIRED type STRING REQUIRED op STRING REQUIRED", "REQUIRED type STRING REQUIRED op STRING REQUIRED } ] } \"\"\" #all and", "= fltr.get(\"bnFilter\",None) self._bnFilter = BNFilter(bf) if bf else None af = fltr.get(\"addressFilter\",None) self._addressFilter", "def __init__(self,rvalue,op): self._rvalue = rvalue Types.checkUint256(self._rvalue) self._op = op assert SafeOP.ops(self._op) def test(self,event_log):", "self._bnFilter else True return all([abitest,addrtest,bntest]) @property def callback(self): return self._callback class AddressFilter(object): __slots__=[\"_value\"]", "__init__(self,fltr): self._name = fltr[\"name\"] type_factory = getattr(Types,fltr[\"type\"]) self._rvalue = type_factory(fltr[\"rvalue\"]) self._type_factory = type_factory", "def test(self,event): lvalue = event[self._name] lvalue = self._type_factory(lvalue) result = getattr(SafeOP,self._op)(event[self._name],self._rvalue) return result", "@staticmethod def le(a,b): return SafeOP.lt(a,b) or SafeOP.eq(a,b) @staticmethod def ne(a,b): return not SafeOP.eq(a,b)", "slots = ['_ops'] @staticmethod def ops(op): return op in ['eq','gt','lt','ge','le','ne',] @staticmethod def eq(a,b):", ".types import Types import logging log = logging.getLogger(__name__) \"\"\" { \"addressFilter\":<address> OPTIONAL, \"bnFilter\":<uint256>", "= BNFilter(bf) if bf else None af = fltr.get(\"addressFilter\",None) self._addressFilter = AddressFilter(af) if", "import Types import logging log = logging.getLogger(__name__) \"\"\" { \"addressFilter\":<address> OPTIONAL, \"bnFilter\":<uint256> OPTIONAL,", "if self._abiFilters: abitest = all([filtr.test(event) for filtr in self._abiFilters]) else: abitest = True", "__init__(self,value): self._value = value def test(self,rvalue): return self._value == rvalue class BNFilter(object): __slots__=[\"_rvalue\",\"_op\"]", "assert SafeOP.ops(self._op) def test(self,event_log): test = getattr(SafeOP,self._op)(event_log.blockNumber, self._rvalue) assert type(test) is bool return", "rvalue class BNFilter(object): __slots__=[\"_rvalue\",\"_op\"] def __init__(self,rvalue,op): self._rvalue = rvalue Types.checkUint256(self._rvalue) self._op = op", "fltr[\"name\"] type_factory = getattr(Types,fltr[\"type\"]) self._rvalue = type_factory(fltr[\"rvalue\"]) self._type_factory = type_factory self._op = fltr[\"op\"]", "bf = fltr.get(\"bnFilter\",None) self._bnFilter = BNFilter(bf) if bf else None af = fltr.get(\"addressFilter\",None)", "rvalue HEXSTR OR INT REQUIRED type STRING REQUIRED op STRING REQUIRED } ]", "= getattr(SafeOP,self._op)(event_log.blockNumber, self._rvalue) assert type(test) is bool return test class ABIFilter(object): __slots__=[\"_rvalue\",\"_op\",\"_name\",\"_type\"] def", "AddressFilter(object): __slots__=[\"_value\"] def __init__(self,value): self._value = value def test(self,rvalue): return self._value == rvalue", "\"\"\" #all and logic mapped class EventFilter(object): __slots__=[\"_abiFilters\",\"_bnFilter\",\"_addressFilter\",\"_callback\"] def __init__(self,fltr,callback): self._callback = callback", "= True addrtest = self._addressFilter.test(event_log.address) if self._addressFilter else True bntest = self._bnFilter.test(event_log.blockNumber) if", "return SafeOP.gt(a,b) or SafeOP.eq(a,b) @staticmethod def le(a,b): return SafeOP.lt(a,b) or SafeOP.eq(a,b) @staticmethod def", "REQUIRED } ] } \"\"\" #all and logic mapped class EventFilter(object): __slots__=[\"_abiFilters\",\"_bnFilter\",\"_addressFilter\",\"_callback\"] def", "STRING REQUIRED } ] } \"\"\" #all and logic mapped class EventFilter(object): __slots__=[\"_abiFilters\",\"_bnFilter\",\"_addressFilter\",\"_callback\"]", "AddressFilter(af) if af else None self._abiFilters = [ABIFilter(f) for f in fltr.get(\"abiFilters\",[])] @property", "getattr(Types,fltr[\"type\"]) self._rvalue = type_factory(fltr[\"rvalue\"]) self._type_factory = type_factory self._op = fltr[\"op\"] assert SafeOP.ops(self._op) def", "== rvalue class BNFilter(object): __slots__=[\"_rvalue\",\"_op\"] def __init__(self,rvalue,op): self._rvalue = rvalue Types.checkUint256(self._rvalue) self._op =", "= type_factory self._op = fltr[\"op\"] assert SafeOP.ops(self._op) def test(self,event): lvalue = event[self._name] lvalue", "type_factory(fltr[\"rvalue\"]) self._type_factory = type_factory self._op = fltr[\"op\"] assert SafeOP.ops(self._op) def test(self,event): lvalue =", "def eq(a,b): return a == b @staticmethod def gt(a,b): return a > b", "['_ops'] @staticmethod def ops(op): return op in ['eq','gt','lt','ge','le','ne',] @staticmethod def eq(a,b): return a", "eq(a,b): return a == b @staticmethod def gt(a,b): return a > b @staticmethod", "b @staticmethod def lt(a,b): return not SafeOP.gt(a,b) @staticmethod def ge(a,b): return SafeOP.gt(a,b) or", "class AddressFilter(object): __slots__=[\"_value\"] def __init__(self,value): self._value = value def test(self,rvalue): return self._value ==", "return not SafeOP.gt(a,b) @staticmethod def ge(a,b): return SafeOP.gt(a,b) or SafeOP.eq(a,b) @staticmethod def le(a,b):", "= AddressFilter(af) if af else None self._abiFilters = [ABIFilter(f) for f in fltr.get(\"abiFilters\",[])]", "= getattr(SafeOP,self._op)(event[self._name],self._rvalue) return result class SafeOP(object): slots = ['_ops'] @staticmethod def ops(op): return", "self._rvalue = type_factory(fltr[\"rvalue\"]) self._type_factory = type_factory self._op = fltr[\"op\"] assert SafeOP.ops(self._op) def test(self,event):", "result = getattr(SafeOP,self._op)(event[self._name],self._rvalue) return result class SafeOP(object): slots = ['_ops'] @staticmethod def ops(op):", "self._name = fltr[\"name\"] type_factory = getattr(Types,fltr[\"type\"]) self._rvalue = type_factory(fltr[\"rvalue\"]) self._type_factory = type_factory self._op", "ge(a,b): return SafeOP.gt(a,b) or SafeOP.eq(a,b) @staticmethod def le(a,b): return SafeOP.lt(a,b) or SafeOP.eq(a,b) @staticmethod", "= event[self._name] lvalue = self._type_factory(lvalue) result = getattr(SafeOP,self._op)(event[self._name],self._rvalue) return result class SafeOP(object): slots", "self._addressFilter.test(event_log.address) if self._addressFilter else True bntest = self._bnFilter.test(event_log.blockNumber) if self._bnFilter else True return", "self._rvalue = rvalue Types.checkUint256(self._rvalue) self._op = op assert SafeOP.ops(self._op) def test(self,event_log): test =", "Types import logging log = logging.getLogger(__name__) \"\"\" { \"addressFilter\":<address> OPTIONAL, \"bnFilter\":<uint256> OPTIONAL, \"abiFilters\":", "def test(self,event_log): test = getattr(SafeOP,self._op)(event_log.blockNumber, self._rvalue) assert type(test) is bool return test class", "if self._bnFilter else True return all([abitest,addrtest,bntest]) @property def callback(self): return self._callback class AddressFilter(object):", "True return all([abitest,addrtest,bntest]) @property def callback(self): return self._callback class AddressFilter(object): __slots__=[\"_value\"] def __init__(self,value):", "return result class SafeOP(object): slots = ['_ops'] @staticmethod def ops(op): return op in", "test(self, event_log, event): if self._abiFilters: abitest = all([filtr.test(event) for filtr in self._abiFilters]) else:", "SafeOP.ops(self._op) def test(self,event_log): test = getattr(SafeOP,self._op)(event_log.blockNumber, self._rvalue) assert type(test) is bool return test", "SafeOP.ops(self._op) def test(self,event): lvalue = event[self._name] lvalue = self._type_factory(lvalue) result = getattr(SafeOP,self._op)(event[self._name],self._rvalue) return", "def test(self, event_log, event): if self._abiFilters: abitest = all([filtr.test(event) for filtr in self._abiFilters])", "BNFilter(bf) if bf else None af = fltr.get(\"addressFilter\",None) self._addressFilter = AddressFilter(af) if af", "def test(self,rvalue): return self._value == rvalue class BNFilter(object): __slots__=[\"_rvalue\",\"_op\"] def __init__(self,rvalue,op): self._rvalue =", "lvalue = event[self._name] lvalue = self._type_factory(lvalue) result = getattr(SafeOP,self._op)(event[self._name],self._rvalue) return result class SafeOP(object):", "else: abitest = True addrtest = self._addressFilter.test(event_log.address) if self._addressFilter else True bntest =", "test(self,rvalue): return self._value == rvalue class BNFilter(object): __slots__=[\"_rvalue\",\"_op\"] def __init__(self,rvalue,op): self._rvalue = rvalue", "self._rvalue) assert type(test) is bool return test class ABIFilter(object): __slots__=[\"_rvalue\",\"_op\",\"_name\",\"_type\"] def __init__(self,fltr): self._name", "SafeOP.gt(a,b) @staticmethod def ge(a,b): return SafeOP.gt(a,b) or SafeOP.eq(a,b) @staticmethod def le(a,b): return SafeOP.lt(a,b)", "class EventFilter(object): __slots__=[\"_abiFilters\",\"_bnFilter\",\"_addressFilter\",\"_callback\"] def __init__(self,fltr,callback): self._callback = callback bf = fltr.get(\"bnFilter\",None) self._bnFilter =", "logic mapped class EventFilter(object): __slots__=[\"_abiFilters\",\"_bnFilter\",\"_addressFilter\",\"_callback\"] def __init__(self,fltr,callback): self._callback = callback bf = fltr.get(\"bnFilter\",None)", "== b @staticmethod def gt(a,b): return a > b @staticmethod def lt(a,b): return", "__slots__=[\"_rvalue\",\"_op\",\"_name\",\"_type\"] def __init__(self,fltr): self._name = fltr[\"name\"] type_factory = getattr(Types,fltr[\"type\"]) self._rvalue = type_factory(fltr[\"rvalue\"]) self._type_factory", "= getattr(Types,fltr[\"type\"]) self._rvalue = type_factory(fltr[\"rvalue\"]) self._type_factory = type_factory self._op = fltr[\"op\"] assert SafeOP.ops(self._op)", "if af else None self._abiFilters = [ABIFilter(f) for f in fltr.get(\"abiFilters\",[])] @property def", "in self._abiFilters]) else: abitest = True addrtest = self._addressFilter.test(event_log.address) if self._addressFilter else True", "= op assert SafeOP.ops(self._op) def test(self,event_log): test = getattr(SafeOP,self._op)(event_log.blockNumber, self._rvalue) assert type(test) is", "test = getattr(SafeOP,self._op)(event_log.blockNumber, self._rvalue) assert type(test) is bool return test class ABIFilter(object): __slots__=[\"_rvalue\",\"_op\",\"_name\",\"_type\"]", "fltr.get(\"bnFilter\",None) self._bnFilter = BNFilter(bf) if bf else None af = fltr.get(\"addressFilter\",None) self._addressFilter =", "self._type_factory = type_factory self._op = fltr[\"op\"] assert SafeOP.ops(self._op) def test(self,event): lvalue = event[self._name]", "logging.getLogger(__name__) \"\"\" { \"addressFilter\":<address> OPTIONAL, \"bnFilter\":<uint256> OPTIONAL, \"abiFilters\": OPTIONAL [ { name STRING", "= fltr[\"op\"] assert SafeOP.ops(self._op) def test(self,event): lvalue = event[self._name] lvalue = self._type_factory(lvalue) result", "self._bnFilter.test(event_log.blockNumber) if self._bnFilter else True return all([abitest,addrtest,bntest]) @property def callback(self): return self._callback class", "type_factory = getattr(Types,fltr[\"type\"]) self._rvalue = type_factory(fltr[\"rvalue\"]) self._type_factory = type_factory self._op = fltr[\"op\"] assert", "assert type(test) is bool return test class ABIFilter(object): __slots__=[\"_rvalue\",\"_op\",\"_name\",\"_type\"] def __init__(self,fltr): self._name =", "= logging.getLogger(__name__) \"\"\" { \"addressFilter\":<address> OPTIONAL, \"bnFilter\":<uint256> OPTIONAL, \"abiFilters\": OPTIONAL [ { name", "in fltr.get(\"abiFilters\",[])] @property def topic(self): return self._topic async def test(self, event_log, event): if", "b @staticmethod def gt(a,b): return a > b @staticmethod def lt(a,b): return not", "bntest = self._bnFilter.test(event_log.blockNumber) if self._bnFilter else True return all([abitest,addrtest,bntest]) @property def callback(self): return", "return self._callback class AddressFilter(object): __slots__=[\"_value\"] def __init__(self,value): self._value = value def test(self,rvalue): return", "__init__(self,rvalue,op): self._rvalue = rvalue Types.checkUint256(self._rvalue) self._op = op assert SafeOP.ops(self._op) def test(self,event_log): test", "def ops(op): return op in ['eq','gt','lt','ge','le','ne',] @staticmethod def eq(a,b): return a == b", "@staticmethod def lt(a,b): return not SafeOP.gt(a,b) @staticmethod def ge(a,b): return SafeOP.gt(a,b) or SafeOP.eq(a,b)", "test class ABIFilter(object): __slots__=[\"_rvalue\",\"_op\",\"_name\",\"_type\"] def __init__(self,fltr): self._name = fltr[\"name\"] type_factory = getattr(Types,fltr[\"type\"]) self._rvalue", "async def test(self, event_log, event): if self._abiFilters: abitest = all([filtr.test(event) for filtr in", "af else None self._abiFilters = [ABIFilter(f) for f in fltr.get(\"abiFilters\",[])] @property def topic(self):", "op STRING REQUIRED } ] } \"\"\" #all and logic mapped class EventFilter(object):", "test(self,event_log): test = getattr(SafeOP,self._op)(event_log.blockNumber, self._rvalue) assert type(test) is bool return test class ABIFilter(object):", "getattr(SafeOP,self._op)(event_log.blockNumber, self._rvalue) assert type(test) is bool return test class ABIFilter(object): __slots__=[\"_rvalue\",\"_op\",\"_name\",\"_type\"] def __init__(self,fltr):", "return op in ['eq','gt','lt','ge','le','ne',] @staticmethod def eq(a,b): return a == b @staticmethod def", "@staticmethod def eq(a,b): return a == b @staticmethod def gt(a,b): return a >", "all([filtr.test(event) for filtr in self._abiFilters]) else: abitest = True addrtest = self._addressFilter.test(event_log.address) if", "abitest = all([filtr.test(event) for filtr in self._abiFilters]) else: abitest = True addrtest =", "= value def test(self,rvalue): return self._value == rvalue class BNFilter(object): __slots__=[\"_rvalue\",\"_op\"] def __init__(self,rvalue,op):", "def topic(self): return self._topic async def test(self, event_log, event): if self._abiFilters: abitest =", "for f in fltr.get(\"abiFilters\",[])] @property def topic(self): return self._topic async def test(self, event_log,", "OR INT REQUIRED type STRING REQUIRED op STRING REQUIRED } ] } \"\"\"", "= type_factory(fltr[\"rvalue\"]) self._type_factory = type_factory self._op = fltr[\"op\"] assert SafeOP.ops(self._op) def test(self,event): lvalue", "fltr.get(\"addressFilter\",None) self._addressFilter = AddressFilter(af) if af else None self._abiFilters = [ABIFilter(f) for f", "@staticmethod def gt(a,b): return a > b @staticmethod def lt(a,b): return not SafeOP.gt(a,b)", "self._addressFilter else True bntest = self._bnFilter.test(event_log.blockNumber) if self._bnFilter else True return all([abitest,addrtest,bntest]) @property", "__slots__=[\"_value\"] def __init__(self,value): self._value = value def test(self,rvalue): return self._value == rvalue class", "SafeOP.gt(a,b) or SafeOP.eq(a,b) @staticmethod def le(a,b): return SafeOP.lt(a,b) or SafeOP.eq(a,b) @staticmethod def ne(a,b):", "self._value == rvalue class BNFilter(object): __slots__=[\"_rvalue\",\"_op\"] def __init__(self,rvalue,op): self._rvalue = rvalue Types.checkUint256(self._rvalue) self._op", "= rvalue Types.checkUint256(self._rvalue) self._op = op assert SafeOP.ops(self._op) def test(self,event_log): test = getattr(SafeOP,self._op)(event_log.blockNumber,", "\"bnFilter\":<uint256> OPTIONAL, \"abiFilters\": OPTIONAL [ { name STRING REQUIRED rvalue HEXSTR OR INT", "self._abiFilters]) else: abitest = True addrtest = self._addressFilter.test(event_log.address) if self._addressFilter else True bntest", "return self._value == rvalue class BNFilter(object): __slots__=[\"_rvalue\",\"_op\"] def __init__(self,rvalue,op): self._rvalue = rvalue Types.checkUint256(self._rvalue)", "None self._abiFilters = [ABIFilter(f) for f in fltr.get(\"abiFilters\",[])] @property def topic(self): return self._topic", "a > b @staticmethod def lt(a,b): return not SafeOP.gt(a,b) @staticmethod def ge(a,b): return", "__init__(self,fltr,callback): self._callback = callback bf = fltr.get(\"bnFilter\",None) self._bnFilter = BNFilter(bf) if bf else", "ABIFilter(object): __slots__=[\"_rvalue\",\"_op\",\"_name\",\"_type\"] def __init__(self,fltr): self._name = fltr[\"name\"] type_factory = getattr(Types,fltr[\"type\"]) self._rvalue = type_factory(fltr[\"rvalue\"])", "name STRING REQUIRED rvalue HEXSTR OR INT REQUIRED type STRING REQUIRED op STRING", "@property def topic(self): return self._topic async def test(self, event_log, event): if self._abiFilters: abitest", "topic(self): return self._topic async def test(self, event_log, event): if self._abiFilters: abitest = all([filtr.test(event)", "self._op = op assert SafeOP.ops(self._op) def test(self,event_log): test = getattr(SafeOP,self._op)(event_log.blockNumber, self._rvalue) assert type(test)", "\"addressFilter\":<address> OPTIONAL, \"bnFilter\":<uint256> OPTIONAL, \"abiFilters\": OPTIONAL [ { name STRING REQUIRED rvalue HEXSTR", "return test class ABIFilter(object): __slots__=[\"_rvalue\",\"_op\",\"_name\",\"_type\"] def __init__(self,fltr): self._name = fltr[\"name\"] type_factory = getattr(Types,fltr[\"type\"])", "op in ['eq','gt','lt','ge','le','ne',] @staticmethod def eq(a,b): return a == b @staticmethod def gt(a,b):", "STRING REQUIRED op STRING REQUIRED } ] } \"\"\" #all and logic mapped", "if bf else None af = fltr.get(\"addressFilter\",None) self._addressFilter = AddressFilter(af) if af else", "= ['_ops'] @staticmethod def ops(op): return op in ['eq','gt','lt','ge','le','ne',] @staticmethod def eq(a,b): return", "callback bf = fltr.get(\"bnFilter\",None) self._bnFilter = BNFilter(bf) if bf else None af =", "[ABIFilter(f) for f in fltr.get(\"abiFilters\",[])] @property def topic(self): return self._topic async def test(self,", "def ge(a,b): return SafeOP.gt(a,b) or SafeOP.eq(a,b) @staticmethod def le(a,b): return SafeOP.lt(a,b) or SafeOP.eq(a,b)", "return a > b @staticmethod def lt(a,b): return not SafeOP.gt(a,b) @staticmethod def ge(a,b):", "def callback(self): return self._callback class AddressFilter(object): __slots__=[\"_value\"] def __init__(self,value): self._value = value def", "event_log, event): if self._abiFilters: abitest = all([filtr.test(event) for filtr in self._abiFilters]) else: abitest", "self._value = value def test(self,rvalue): return self._value == rvalue class BNFilter(object): __slots__=[\"_rvalue\",\"_op\"] def", "return self._topic async def test(self, event_log, event): if self._abiFilters: abitest = all([filtr.test(event) for", "self._op = fltr[\"op\"] assert SafeOP.ops(self._op) def test(self,event): lvalue = event[self._name] lvalue = self._type_factory(lvalue)", "REQUIRED rvalue HEXSTR OR INT REQUIRED type STRING REQUIRED op STRING REQUIRED }", "def lt(a,b): return not SafeOP.gt(a,b) @staticmethod def ge(a,b): return SafeOP.gt(a,b) or SafeOP.eq(a,b) @staticmethod", "None af = fltr.get(\"addressFilter\",None) self._addressFilter = AddressFilter(af) if af else None self._abiFilters =", "else True bntest = self._bnFilter.test(event_log.blockNumber) if self._bnFilter else True return all([abitest,addrtest,bntest]) @property def", "def __init__(self,value): self._value = value def test(self,rvalue): return self._value == rvalue class BNFilter(object):", "] } \"\"\" #all and logic mapped class EventFilter(object): __slots__=[\"_abiFilters\",\"_bnFilter\",\"_addressFilter\",\"_callback\"] def __init__(self,fltr,callback): self._callback", "else None self._abiFilters = [ABIFilter(f) for f in fltr.get(\"abiFilters\",[])] @property def topic(self): return", "= self._bnFilter.test(event_log.blockNumber) if self._bnFilter else True return all([abitest,addrtest,bntest]) @property def callback(self): return self._callback", "SafeOP.eq(a,b) @staticmethod def le(a,b): return SafeOP.lt(a,b) or SafeOP.eq(a,b) @staticmethod def ne(a,b): return not", "getattr(SafeOP,self._op)(event[self._name],self._rvalue) return result class SafeOP(object): slots = ['_ops'] @staticmethod def ops(op): return op", "and logic mapped class EventFilter(object): __slots__=[\"_abiFilters\",\"_bnFilter\",\"_addressFilter\",\"_callback\"] def __init__(self,fltr,callback): self._callback = callback bf =", "logging log = logging.getLogger(__name__) \"\"\" { \"addressFilter\":<address> OPTIONAL, \"bnFilter\":<uint256> OPTIONAL, \"abiFilters\": OPTIONAL [", "self._addressFilter = AddressFilter(af) if af else None self._abiFilters = [ABIFilter(f) for f in", "lt(a,b): return not SafeOP.gt(a,b) @staticmethod def ge(a,b): return SafeOP.gt(a,b) or SafeOP.eq(a,b) @staticmethod def", "for filtr in self._abiFilters]) else: abitest = True addrtest = self._addressFilter.test(event_log.address) if self._addressFilter", "} \"\"\" #all and logic mapped class EventFilter(object): __slots__=[\"_abiFilters\",\"_bnFilter\",\"_addressFilter\",\"_callback\"] def __init__(self,fltr,callback): self._callback =", "fltr.get(\"abiFilters\",[])] @property def topic(self): return self._topic async def test(self, event_log, event): if self._abiFilters:", "addrtest = self._addressFilter.test(event_log.address) if self._addressFilter else True bntest = self._bnFilter.test(event_log.blockNumber) if self._bnFilter else", "self._abiFilters = [ABIFilter(f) for f in fltr.get(\"abiFilters\",[])] @property def topic(self): return self._topic async", "bf else None af = fltr.get(\"addressFilter\",None) self._addressFilter = AddressFilter(af) if af else None", "[ { name STRING REQUIRED rvalue HEXSTR OR INT REQUIRED type STRING REQUIRED", "is bool return test class ABIFilter(object): __slots__=[\"_rvalue\",\"_op\",\"_name\",\"_type\"] def __init__(self,fltr): self._name = fltr[\"name\"] type_factory", "lvalue = self._type_factory(lvalue) result = getattr(SafeOP,self._op)(event[self._name],self._rvalue) return result class SafeOP(object): slots = ['_ops']", "__slots__=[\"_abiFilters\",\"_bnFilter\",\"_addressFilter\",\"_callback\"] def __init__(self,fltr,callback): self._callback = callback bf = fltr.get(\"bnFilter\",None) self._bnFilter = BNFilter(bf) if", "gt(a,b): return a > b @staticmethod def lt(a,b): return not SafeOP.gt(a,b) @staticmethod def", "{ \"addressFilter\":<address> OPTIONAL, \"bnFilter\":<uint256> OPTIONAL, \"abiFilters\": OPTIONAL [ { name STRING REQUIRED rvalue", "class ABIFilter(object): __slots__=[\"_rvalue\",\"_op\",\"_name\",\"_type\"] def __init__(self,fltr): self._name = fltr[\"name\"] type_factory = getattr(Types,fltr[\"type\"]) self._rvalue =", "def __init__(self,fltr,callback): self._callback = callback bf = fltr.get(\"bnFilter\",None) self._bnFilter = BNFilter(bf) if bf", "OPTIONAL [ { name STRING REQUIRED rvalue HEXSTR OR INT REQUIRED type STRING", "True bntest = self._bnFilter.test(event_log.blockNumber) if self._bnFilter else True return all([abitest,addrtest,bntest]) @property def callback(self):", "result class SafeOP(object): slots = ['_ops'] @staticmethod def ops(op): return op in ['eq','gt','lt','ge','le','ne',]", "return a == b @staticmethod def gt(a,b): return a > b @staticmethod def", "True addrtest = self._addressFilter.test(event_log.address) if self._addressFilter else True bntest = self._bnFilter.test(event_log.blockNumber) if self._bnFilter", "not SafeOP.gt(a,b) @staticmethod def ge(a,b): return SafeOP.gt(a,b) or SafeOP.eq(a,b) @staticmethod def le(a,b): return", "mapped class EventFilter(object): __slots__=[\"_abiFilters\",\"_bnFilter\",\"_addressFilter\",\"_callback\"] def __init__(self,fltr,callback): self._callback = callback bf = fltr.get(\"bnFilter\",None) self._bnFilter", "EventFilter(object): __slots__=[\"_abiFilters\",\"_bnFilter\",\"_addressFilter\",\"_callback\"] def __init__(self,fltr,callback): self._callback = callback bf = fltr.get(\"bnFilter\",None) self._bnFilter = BNFilter(bf)", "\"\"\" { \"addressFilter\":<address> OPTIONAL, \"bnFilter\":<uint256> OPTIONAL, \"abiFilters\": OPTIONAL [ { name STRING REQUIRED", "{ name STRING REQUIRED rvalue HEXSTR OR INT REQUIRED type STRING REQUIRED op", "type(test) is bool return test class ABIFilter(object): __slots__=[\"_rvalue\",\"_op\",\"_name\",\"_type\"] def __init__(self,fltr): self._name = fltr[\"name\"]", "@staticmethod def ge(a,b): return SafeOP.gt(a,b) or SafeOP.eq(a,b) @staticmethod def le(a,b): return SafeOP.lt(a,b) or", "in ['eq','gt','lt','ge','le','ne',] @staticmethod def eq(a,b): return a == b @staticmethod def gt(a,b): return", "else None af = fltr.get(\"addressFilter\",None) self._addressFilter = AddressFilter(af) if af else None self._abiFilters", "INT REQUIRED type STRING REQUIRED op STRING REQUIRED } ] } \"\"\" #all", "= callback bf = fltr.get(\"bnFilter\",None) self._bnFilter = BNFilter(bf) if bf else None af", "af = fltr.get(\"addressFilter\",None) self._addressFilter = AddressFilter(af) if af else None self._abiFilters = [ABIFilter(f)", "abitest = True addrtest = self._addressFilter.test(event_log.address) if self._addressFilter else True bntest = self._bnFilter.test(event_log.blockNumber)", "filtr in self._abiFilters]) else: abitest = True addrtest = self._addressFilter.test(event_log.address) if self._addressFilter else", "if self._addressFilter else True bntest = self._bnFilter.test(event_log.blockNumber) if self._bnFilter else True return all([abitest,addrtest,bntest])", "= self._type_factory(lvalue) result = getattr(SafeOP,self._op)(event[self._name],self._rvalue) return result class SafeOP(object): slots = ['_ops'] @staticmethod", "['eq','gt','lt','ge','le','ne',] @staticmethod def eq(a,b): return a == b @staticmethod def gt(a,b): return a", "self._callback = callback bf = fltr.get(\"bnFilter\",None) self._bnFilter = BNFilter(bf) if bf else None", "def gt(a,b): return a > b @staticmethod def lt(a,b): return not SafeOP.gt(a,b) @staticmethod", "ops(op): return op in ['eq','gt','lt','ge','le','ne',] @staticmethod def eq(a,b): return a == b @staticmethod", "self._type_factory(lvalue) result = getattr(SafeOP,self._op)(event[self._name],self._rvalue) return result class SafeOP(object): slots = ['_ops'] @staticmethod def", "test(self,event): lvalue = event[self._name] lvalue = self._type_factory(lvalue) result = getattr(SafeOP,self._op)(event[self._name],self._rvalue) return result class", "SafeOP(object): slots = ['_ops'] @staticmethod def ops(op): return op in ['eq','gt','lt','ge','le','ne',] @staticmethod def", "class BNFilter(object): __slots__=[\"_rvalue\",\"_op\"] def __init__(self,rvalue,op): self._rvalue = rvalue Types.checkUint256(self._rvalue) self._op = op assert", "HEXSTR OR INT REQUIRED type STRING REQUIRED op STRING REQUIRED } ] }", "> b @staticmethod def lt(a,b): return not SafeOP.gt(a,b) @staticmethod def ge(a,b): return SafeOP.gt(a,b)", "REQUIRED op STRING REQUIRED } ] } \"\"\" #all and logic mapped class", "self._bnFilter = BNFilter(bf) if bf else None af = fltr.get(\"addressFilter\",None) self._addressFilter = AddressFilter(af)", "@property def callback(self): return self._callback class AddressFilter(object): __slots__=[\"_value\"] def __init__(self,value): self._value = value", "op assert SafeOP.ops(self._op) def test(self,event_log): test = getattr(SafeOP,self._op)(event_log.blockNumber, self._rvalue) assert type(test) is bool", "OPTIONAL, \"bnFilter\":<uint256> OPTIONAL, \"abiFilters\": OPTIONAL [ { name STRING REQUIRED rvalue HEXSTR OR", "#all and logic mapped class EventFilter(object): __slots__=[\"_abiFilters\",\"_bnFilter\",\"_addressFilter\",\"_callback\"] def __init__(self,fltr,callback): self._callback = callback bf", "event[self._name] lvalue = self._type_factory(lvalue) result = getattr(SafeOP,self._op)(event[self._name],self._rvalue) return result class SafeOP(object): slots =", "= self._addressFilter.test(event_log.address) if self._addressFilter else True bntest = self._bnFilter.test(event_log.blockNumber) if self._bnFilter else True", "BNFilter(object): __slots__=[\"_rvalue\",\"_op\"] def __init__(self,rvalue,op): self._rvalue = rvalue Types.checkUint256(self._rvalue) self._op = op assert SafeOP.ops(self._op)", "fltr[\"op\"] assert SafeOP.ops(self._op) def test(self,event): lvalue = event[self._name] lvalue = self._type_factory(lvalue) result =", "OPTIONAL, \"abiFilters\": OPTIONAL [ { name STRING REQUIRED rvalue HEXSTR OR INT REQUIRED", "callback(self): return self._callback class AddressFilter(object): __slots__=[\"_value\"] def __init__(self,value): self._value = value def test(self,rvalue):", "Types.checkUint256(self._rvalue) self._op = op assert SafeOP.ops(self._op) def test(self,event_log): test = getattr(SafeOP,self._op)(event_log.blockNumber, self._rvalue) assert", "} ] } \"\"\" #all and logic mapped class EventFilter(object): __slots__=[\"_abiFilters\",\"_bnFilter\",\"_addressFilter\",\"_callback\"] def __init__(self,fltr,callback):", "self._abiFilters: abitest = all([filtr.test(event) for filtr in self._abiFilters]) else: abitest = True addrtest", "= all([filtr.test(event) for filtr in self._abiFilters]) else: abitest = True addrtest = self._addressFilter.test(event_log.address)", "all([abitest,addrtest,bntest]) @property def callback(self): return self._callback class AddressFilter(object): __slots__=[\"_value\"] def __init__(self,value): self._value =", "rvalue Types.checkUint256(self._rvalue) self._op = op assert SafeOP.ops(self._op) def test(self,event_log): test = getattr(SafeOP,self._op)(event_log.blockNumber, self._rvalue)", "= [ABIFilter(f) for f in fltr.get(\"abiFilters\",[])] @property def topic(self): return self._topic async def" ]
[ "missing months df_new = dd_for_site(stn, next_mo).query('coverage > @MIN_COVERAGE').copy() if len(df_new): # put this", "compression='bz2') If the file is located on a web server, you can read", "\"\"\" from os.path import dirname, join, realpath import sys from datetime import datetime,", "to the existing one df_new.reset_index(inplace=True) df_new.index = [stn] * len(df_new) df_new.index.name = 'station'", "if __name__ == '__main__': df_exist = pd.read_pickle(join(APP_PATH, 'data/degree_days.pkl'), compression='bz2') # list of new", "base 60 F degree-days. Temperature data used to calculate degree-days comes from the", "the most recent months not already present in the DataFrame. All stations found", "has the following format: month hdd60 hdd65 station PAED 2018-02-01 1257.648675 1397.648675 PAED", "is located on a web server, you can read it with the following", "actually have data. \"\"\" # get beginning of month st_dt_1 = start_date.replace(day=1, hour=0,", "index is the National Weather Service 4-letter station code. The 'month' column is", "that being the first day of the month. The columns of the DataFrame", "degree-day values: the first is base 60 degree F values and the second", "= [stn] * len(df_new) df_new.index.name = 'station' df_new.drop(columns=['coverage'], inplace=True) # add it to", "most recent months not already present in the DataFrame. All stations found in", "the DataFrame because it satisfies the MIN_COVERAGE check described below. This script assumes", "\"\"\"Returns a Pandas Dataframe of monthly heating degree-day values for 'stn' (a NWS", "is filled in with the average value for the rest of the hours", "in the month or a partial month may be prematurely added to the", "pd import requests # Minimum fraction of the hours in a month that", "run on the first day of the month so that the prior month's", "+ new_dfs) # get it sorted by station and month df_final.reset_index(inplace=True) df_final.sort_values(['station', 'month'],", "pd.read_pickle(join(APP_PATH, 'data/degree_days.pkl'), compression='bz2') # list of new DataFrames to add to the existing", "dfc['coverage'] = dfc.temp / dfc.total_hours # Now back to the main dataframe to", "hours that actually have data. \"\"\" # get beginning of month st_dt_1 =", "compression='bz2') Once you have a DataFrame, you can extract that portion of the", "site, https://bms.ahfc.us . This script is typically run from a Cron job that", "60 F degree-days. Temperature data used to calculate degree-days comes from the AHFC", "The 'month' column is a first-of-the-month date identifying the month whose degree-days are", "eventually add to the # degree-day DataFrame new_dfs.append(df_new) print('{} new months'.format(len(df_new))) else: print()", "script late in the month or a partial month may be prematurely added", "is typically run from a Cron job that schedules the script to run", "hdd60 hdd65 station PAED 2018-02-01 1257.648675 1397.648675 PAED 2018-03-01 1028.027773 1183.027773 The index", "i in dfc.index] # index is last day of the month dfc['coverage'] =", "days start in the month that 'start_date' (Python date/time object) falls in and", "MIN_COVERAGE check described below. This script assumes the pickled DataFrame already exists and", "import requests from io import BytesIO b = requests.get('http://ahfc.webfactional.com/data/degree_days.pkl').content d = pd.read_pickle(BytesIO(b), compression='bz2')", "degree-day information comes from the AHFC BMON site, https://bms.ahfc.us . This script is", "present in the DataFrame. All stations found in the index of the DataFrame", "# Convert index timestamps to beginning of the month mos = [datetime(d.year, d.month,", "df_new = dd_for_site(stn, next_mo).query('coverage > @MIN_COVERAGE').copy() if len(df_new): # put this DataFrame in", "for x in df.temp] df['hdd65'] = [(65.0 - x)/24.0 if x<65.0 else 0.0", "list of new DataFrames to eventually add to the # degree-day DataFrame new_dfs.append(df_new)", "with the average value for the rest of the hours that do have", "@MIN_COVERAGE').copy() if len(df_new): # put this DataFrame in a form that can be", "months: df_one_site = df.query(\"station == 'PAMR' and month >= '2018-01-01'\") \"\"\" from os.path", "shown. 'hdd60' and 'hdd65' are the heating degree-day values: the first is base", "the second is base 65 deg F values. This script will acquire temperature", "= pd.read_pickle('degree_days.pkl', compression='bz2') If the file is located on a web server, you", "(a NWS weather site code). Degree days start in the month that 'start_date'", "new DataFrames to eventually add to the # degree-day DataFrame new_dfs.append(df_new) print('{} new", "Save the DataFrame as a compressed pickle and a CSV file. df_final.to_pickle(join(APP_PATH, 'data/degree_days.pkl'),", "month's degree days will be available. Dont' run the script late in the", "path 'data/degree_days.pkl' (compression = 'bz2'). It also saves the DataFrame as a CSV", "first-of-the-month date identifying the month whose degree-days are shown. 'hdd60' and 'hdd65' are", "'PAMR' and month >= '2018-01-01'\") \"\"\" from os.path import dirname, join, realpath import", "# get a date in the following month next_mo = last_mo + timedelta(days=32)", "in a month that must have data in order # to include the", "MIN_COVERAGE = 0.7 print('\\nScript Start: {}'.format(datetime.now().ctime())) # path to this directory APP_PATH =", "#!/usr/local/bin/python3.6 \"\"\"Script that adds monthly heating degree day values to a pickled Pandas", "order to calculate the degree-days for the most recent months not already present", "last_mo + timedelta(days=32) # could be a DST change in there; add 32", "dataframe to calc degree-days df['hdd60'] = [(60.0 - x)/24.0 if x<60.0 else 0.0", "the DataFrame as a CSV file at 'data/degree_days.csv'. The new degree-day information comes", "degree F values and the second is base 65 deg F values. This", "assumed to not deviate from the average of the data present. The column", "df = pd.DataFrame(resp['data']['readings'], columns=['ts', 'temp']) df.set_index('ts', inplace=True) df.index = pd.to_datetime(df.index) # calculate the", "so that the prior month's degree days will be available. Dont' run the", "to not deviate from the average of the data present. The column 'coverage'", "NOTES ON UTILIZING THE DATA To read this DataFrame back into a Python", "axis=1, inplace=True) dfm = df.resample('1M').mean() dfm['coverage'] = dfc.coverage dfm['hdd60'] = dfm.hdd60 * dfc.total_hours", "F values and the second is base 65 deg F values. This script", "adds monthly heating degree day values to a pickled Pandas DataFrame with the", "do have data. ----------------------------------- NOTES ON UTILIZING THE DATA To read this DataFrame", "the month so that the prior month's degree days will be available. Dont'", "order # to include the month. MIN_COVERAGE = 0.7 print('\\nScript Start: {}'.format(datetime.now().ctime())) #", "for the most recent months not already present in the DataFrame. All stations", "in the following month next_mo = last_mo + timedelta(days=32) # could be a", "can extract that portion of the DataFrame that applies to one site by:", "script assumes the pickled DataFrame already exists and has the following format: month", "if len(df_new): # put this DataFrame in a form that can be concatenated", "data in order # to include the month. MIN_COVERAGE = 0.7 print('\\nScript Start:", "the first is base 60 degree F values and the second is base", "to a pickled Pandas DataFrame with the path 'data/degree_days.pkl' (compression = 'bz2'). It", "add to the existing one new_dfs = [] for stn in df_exist.index.unique(): print('Processing", "new DataFrames to add to the existing one new_dfs = [] for stn", "import sys from datetime import datetime, timedelta import pandas as pd import requests", "include the month. MIN_COVERAGE = 0.7 print('\\nScript Start: {}'.format(datetime.now().ctime())) # path to this", "timedelta import pandas as pd import requests # Minimum fraction of the hours", "get a date in the following month next_mo = last_mo + timedelta(days=32) #", "and month >= '2018-01-01'\") \"\"\" from os.path import dirname, join, realpath import sys", "DataFrame that applies to one site by: df_one_site = df.loc['PAMR'] or df_one_site =", "month next_mo = last_mo + timedelta(days=32) # could be a DST change in", "date/time object) falls in and continue through the end of available data. In", "the index of the DataFrame will be updated. The script assumes that the", "= [(60.0 - x)/24.0 if x<60.0 else 0.0 for x in df.temp] df['hdd65']", "get beginning of month st_dt_1 = start_date.replace(day=1, hour=0, minute=0, second=0, microsecond=0) params =", "code with '_temp' appended, e.g. 'PAMR_temp'. The MIN_COVERAGE constant in the script controls", "it satisfies the MIN_COVERAGE check described below. This script assumes the pickled DataFrame", "value for the rest of the hours that do have data. ----------------------------------- NOTES", "= df.query(\"station == 'PAMR' and month >= '2018-01-01'\") \"\"\" from os.path import dirname,", "65 F degree-days and base 60 F degree-days. Temperature data used to calculate", "= [datetime(d.year, d.month, 1) for d in dfm.index] dfm.index = mos dfm.index.name =", "NWS weather site code). Degree days start in the month that 'start_date' (Python", "Degree days start in the month that 'start_date' (Python date/time object) falls in", "calculate the degree-days for the most recent months not already present in the", "the data present. The column 'coverage' indicates the fraction of the months hours", "[(65.0 - x)/24.0 if x<65.0 else 0.0 for x in df.temp] df.drop(['temp'], axis=1,", "'data/degree_days.pkl' (compression = 'bz2'). It also saves the DataFrame as a CSV file", ">= '2018-01-01'\") \"\"\" from os.path import dirname, join, realpath import sys from datetime", "to calc degree-days df['hdd60'] = [(60.0 - x)/24.0 if x<60.0 else 0.0 for", "one new_dfs = [] for stn in df_exist.index.unique(): print('Processing {}: '.format(stn), end='') try:", "PAED 2018-03-01 1028.027773 1183.027773 The index is the National Weather Service 4-letter station", "= 'bz2'). It also saves the DataFrame as a CSV file at 'data/degree_days.csv'.", "print('\\nScript Start: {}'.format(datetime.now().ctime())) # path to this directory APP_PATH = dirname(realpath(__file__)) # URL", "d in dfm.index] dfm.index = mos dfm.index.name = 'month' else: raise ValueError(str(resp['data'])) return", "must have before being included. Missing data is filled in with the average", "fraction of the months hours that actually have data. \"\"\" # get beginning", "\"\"\" # get beginning of month st_dt_1 = start_date.replace(day=1, hour=0, minute=0, second=0, microsecond=0)", "[datetime(d.year, d.month, 1) for d in dfm.index] dfm.index = mos dfm.index.name = 'month'", "URL to the AHFC BMON site API BMON_URL = 'https://bms.ahfc.us/api/v1/readings/{}/' def dd_for_site(stn, start_date):", "degree days for missing months df_new = dd_for_site(stn, next_mo).query('coverage > @MIN_COVERAGE').copy() if len(df_new):", "will acquire temperature data from the AHFC BMON site in order to calculate", "the month. MIN_COVERAGE = 0.7 print('\\nScript Start: {}'.format(datetime.now().ctime())) # path to this directory", "saves the DataFrame as a CSV file at 'data/degree_days.csv'. The new degree-day information", "the AHFC BMON site in order to calculate the degree-days for the most", "the minimum amount of data coverage a month must have before being included.", "of the months: df_one_site = df.query(\"station == 'PAMR' and month >= '2018-01-01'\") \"\"\"", "4-letter station code. The 'month' column is a first-of-the-month date identifying the month", "one site with a subset of the months: df_one_site = df.query(\"station == 'PAMR'", "month >= '2018-01-01'\") \"\"\" from os.path import dirname, join, realpath import sys from", "import pandas as pd df = pd.read_pickle('degree_days.pkl', compression='bz2') If the file is located", "available on a local drive: import pandas as pd df = pd.read_pickle('degree_days.pkl', compression='bz2')", "is available on a local drive: import pandas as pd df = pd.read_pickle('degree_days.pkl',", "for x in df.temp] df.drop(['temp'], axis=1, inplace=True) dfm = df.resample('1M').mean() dfm['coverage'] = dfc.coverage", "month mos = [datetime(d.year, d.month, 1) for d in dfm.index] dfm.index = mos", "from the AHFC BMON site, https://bms.ahfc.us . This script is typically run from", "index timestamps to beginning of the month mos = [datetime(d.year, d.month, 1) for", "index has a timestamp for each month returned, that being the first day", "the months: df_one_site = df.query(\"station == 'PAMR' and month >= '2018-01-01'\") \"\"\" from", "<reponame>alanmitchell/update-degree-days<gh_stars>0 #!/usr/local/bin/python3.6 \"\"\"Script that adds monthly heating degree day values to a pickled", "as pd df = pd.read_pickle('degree_days.pkl', compression='bz2') If the file is located on a", "average of the data present. The column 'coverage' indicates the fraction of the", "data dfc = df.resample('1M').count() dfc['total_hours'] = [i.day * 24 for i in dfc.index]", "add to the # degree-day DataFrame new_dfs.append(df_new) print('{} new months'.format(len(df_new))) else: print() except:", "{ 'start_ts': st_dt_1.strftime('%Y-%m-%d'), 'averaging': '1H' } sensor_id = '{}_temp'.format(stn) resp = requests.get(BMON_URL.format(sensor_id), params=params).json()", "= start_date.replace(day=1, hour=0, minute=0, second=0, microsecond=0) params = { 'start_ts': st_dt_1.strftime('%Y-%m-%d'), 'averaging': '1H'", "late in the month or a partial month may be prematurely added to", "degree-days. Temperature data used to calculate degree-days comes from the AHFC BMON site.", "list of new DataFrames to add to the existing one new_dfs = []", "DataFrames to add to the existing one new_dfs = [] for stn in", "each month returned, that being the first day of the month. The columns", "io import BytesIO b = requests.get('http://ahfc.webfactional.com/data/degree_days.pkl').content d = pd.read_pickle(BytesIO(b), compression='bz2') Once you have", "run from a Cron job that schedules the script to run on the", "df.drop(['temp'], axis=1, inplace=True) dfm = df.resample('1M').mean() dfm['coverage'] = dfc.coverage dfm['hdd60'] = dfm.hdd60 *", "deg F values. This script will acquire temperature data from the AHFC BMON", "= pd.DataFrame(resp['data']['readings'], columns=['ts', 'temp']) df.set_index('ts', inplace=True) df.index = pd.to_datetime(df.index) # calculate the percentage", "'month' else: raise ValueError(str(resp['data'])) return dfm if __name__ == '__main__': df_exist = pd.read_pickle(join(APP_PATH,", "be a DST change in there; add 32 days to be safe #", "this DataFrame in a form that can be concatenated to the existing one", "and the second is base 65 deg F values. This script will acquire", "minimum amount of data coverage a month must have before being included. Missing", "designate base 65 F degree-days and base 60 F degree-days. Temperature data used", "in with the average value for the rest of the hours that do", "pickled Pandas DataFrame with the path 'data/degree_days.pkl' (compression = 'bz2'). It also saves", "this DataFrame back into a Python script, you can excecute the following if", "back into a Python script, you can excecute the following if the DataFrame", "DataFrame are \"hdd65\" and \"hdd60\" to designate base 65 F degree-days and base", "a date in the following month next_mo = last_mo + timedelta(days=32) # could", "in a form that can be concatenated to the existing one df_new.reset_index(inplace=True) df_new.index", "technique) To extract one site with a subset of the months: df_one_site =", "you can extract that portion of the DataFrame that applies to one site", "start_date.replace(day=1, hour=0, minute=0, second=0, microsecond=0) params = { 'start_ts': st_dt_1.strftime('%Y-%m-%d'), 'averaging': '1H' }", "import pandas as pd import requests from io import BytesIO b = requests.get('http://ahfc.webfactional.com/data/degree_days.pkl').content", "ON UTILIZING THE DATA To read this DataFrame back into a Python script,", "dd_for_site(stn, next_mo).query('coverage > @MIN_COVERAGE').copy() if len(df_new): # put this DataFrame in a form", "this station last_mo = df_exist.loc[stn].month.max() # get a date in the following month", "Dont' run the script late in the month or a partial month may", "- x)/24.0 if x<65.0 else 0.0 for x in df.temp] df.drop(['temp'], axis=1, inplace=True)", ". This script is typically run from a Cron job that schedules the", "raise ValueError(str(resp['data'])) return dfm if __name__ == '__main__': df_exist = pd.read_pickle(join(APP_PATH, 'data/degree_days.pkl'), compression='bz2')", "values for 'stn' (a NWS weather site code). Degree days start in the", "[stn] * len(df_new) df_new.index.name = 'station' df_new.drop(columns=['coverage'], inplace=True) # add it to the", "added to the DataFrame because it satisfies the MIN_COVERAGE check described below. This", "calculate degree-days comes from the AHFC BMON site. Missing hours are assumed to", "# put this DataFrame in a form that can be concatenated to the", "the following if the DataFrame is available on a local drive: import pandas", "for the rest of the hours that do have data. ----------------------------------- NOTES ON", "[] for stn in df_exist.index.unique(): print('Processing {}: '.format(stn), end='') try: # get last", "end of available data. In the returned DataFrame, the index has a timestamp", "2018-02-01 1257.648675 1397.648675 PAED 2018-03-01 1028.027773 1183.027773 The index is the National Weather", "'.format(stn), end='') try: # get last month present for this station last_mo =", "the BMON sensor ID for a weather station's temperature data is the 4-letter", "in the index of the DataFrame will be updated. The script assumes that", "months hours that actually have data. \"\"\" # get beginning of month st_dt_1", "4-letter station code with '_temp' appended, e.g. 'PAMR_temp'. The MIN_COVERAGE constant in the", "DataFrame, the index has a timestamp for each month returned, that being the", "= df_exist.loc[stn].month.max() # get a date in the following month next_mo = last_mo", "1) for d in dfm.index] dfm.index = mos dfm.index.name = 'month' else: raise", "be prematurely added to the DataFrame because it satisfies the MIN_COVERAGE check described", "from os.path import dirname, join, realpath import sys from datetime import datetime, timedelta", "PAED 2018-02-01 1257.648675 1397.648675 PAED 2018-03-01 1028.027773 1183.027773 The index is the National", "DataFrame with the path 'data/degree_days.pkl' (compression = 'bz2'). It also saves the DataFrame", "data with the new. df_final = pd.concat([df_exist] + new_dfs) # get it sorted", "for 'stn' (a NWS weather site code). Degree days start in the month", "month that 'start_date' (Python date/time object) falls in and continue through the end", "df['hdd65'] = [(65.0 - x)/24.0 if x<65.0 else 0.0 for x in df.temp]", "comes from the AHFC BMON site. Missing hours are assumed to not deviate", "by: df_one_site = df.loc['PAMR'] or df_one_site = df.query(\"station == 'PAMR'\") (slower than above", "= pd.read_pickle(BytesIO(b), compression='bz2') Once you have a DataFrame, you can extract that portion", "exists and has the following format: month hdd60 hdd65 station PAED 2018-02-01 1257.648675", "file at 'data/degree_days.csv'. The new degree-day information comes from the AHFC BMON site,", "code: import pandas as pd import requests from io import BytesIO b =", "the month. The columns of the DataFrame are \"hdd65\" and \"hdd60\" to designate", "= dfc.temp / dfc.total_hours # Now back to the main dataframe to calc", "print() except: print('{}: {}'.format(*sys.exc_info()[:2])) # Create a new DataFrame that combines the existing", "are the heating degree-day values: the first is base 60 degree F values", "month that has data dfc = df.resample('1M').count() dfc['total_hours'] = [i.day * 24 for", "also saves the DataFrame as a CSV file at 'data/degree_days.csv'. The new degree-day", "join, realpath import sys from datetime import datetime, timedelta import pandas as pd", "can be concatenated to the existing one df_new.reset_index(inplace=True) df_new.index = [stn] * len(df_new)", "identifying the month whose degree-days are shown. 'hdd60' and 'hdd65' are the heating", "data is filled in with the average value for the rest of the", "df.resample('1M').mean() dfm['coverage'] = dfc.coverage dfm['hdd60'] = dfm.hdd60 * dfc.total_hours dfm['hdd65'] = dfm.hdd65 *", "* 24 for i in dfc.index] # index is last day of the", "BMON site in order to calculate the degree-days for the most recent months", "# calculate the percentage of each month that has data dfc = df.resample('1M').count()", "index is last day of the month dfc['coverage'] = dfc.temp / dfc.total_hours #", "dfm['coverage'] = dfc.coverage dfm['hdd60'] = dfm.hdd60 * dfc.total_hours dfm['hdd65'] = dfm.hdd65 * dfc.total_hours", "of the DataFrame that applies to one site by: df_one_site = df.loc['PAMR'] or", "partial month may be prematurely added to the DataFrame because it satisfies the", "df.query(\"station == 'PAMR' and month >= '2018-01-01'\") \"\"\" from os.path import dirname, join,", "import pandas as pd import requests # Minimum fraction of the hours in", "All stations found in the index of the DataFrame will be updated. The", "AHFC BMON site, https://bms.ahfc.us . This script is typically run from a Cron", "to run on the first day of the month so that the prior", "station PAED 2018-02-01 1257.648675 1397.648675 PAED 2018-03-01 1028.027773 1183.027773 The index is the", "script, you can excecute the following if the DataFrame is available on a", "data. ----------------------------------- NOTES ON UTILIZING THE DATA To read this DataFrame back into", "that the BMON sensor ID for a weather station's temperature data is the", "st_dt_1.strftime('%Y-%m-%d'), 'averaging': '1H' } sensor_id = '{}_temp'.format(stn) resp = requests.get(BMON_URL.format(sensor_id), params=params).json() if resp['status']=='success':", "from a Cron job that schedules the script to run on the first", "minute=0, second=0, microsecond=0) params = { 'start_ts': st_dt_1.strftime('%Y-%m-%d'), 'averaging': '1H' } sensor_id =", "station and month df_final.reset_index(inplace=True) df_final.sort_values(['station', 'month'], inplace=True) df_final.set_index('station', inplace=True) # Save the DataFrame", "Temperature data used to calculate degree-days comes from the AHFC BMON site. Missing", "'PAMR'\") (slower than above technique) To extract one site with a subset of", "# index is last day of the month dfc['coverage'] = dfc.temp / dfc.total_hours", "F values. This script will acquire temperature data from the AHFC BMON site", "safe # get degree days for missing months df_new = dd_for_site(stn, next_mo).query('coverage >", "a compressed pickle and a CSV file. df_final.to_pickle(join(APP_PATH, 'data/degree_days.pkl'), compression='bz2', protocol=4) df_final.to_csv(join(APP_PATH, 'data/degree_days.csv'))", "API BMON_URL = 'https://bms.ahfc.us/api/v1/readings/{}/' def dd_for_site(stn, start_date): \"\"\"Returns a Pandas Dataframe of monthly", "} sensor_id = '{}_temp'.format(stn) resp = requests.get(BMON_URL.format(sensor_id), params=params).json() if resp['status']=='success': df = pd.DataFrame(resp['data']['readings'],", "x)/24.0 if x<60.0 else 0.0 for x in df.temp] df['hdd65'] = [(65.0 -", "month may be prematurely added to the DataFrame because it satisfies the MIN_COVERAGE", "heating degree-day values: the first is base 60 degree F values and the", "requests.get('http://ahfc.webfactional.com/data/degree_days.pkl').content d = pd.read_pickle(BytesIO(b), compression='bz2') Once you have a DataFrame, you can extract", "pd.DataFrame(resp['data']['readings'], columns=['ts', 'temp']) df.set_index('ts', inplace=True) df.index = pd.to_datetime(df.index) # calculate the percentage of", "following month next_mo = last_mo + timedelta(days=32) # could be a DST change", "temperature data from the AHFC BMON site in order to calculate the degree-days", "script assumes that the BMON sensor ID for a weather station's temperature data", "have a DataFrame, you can extract that portion of the DataFrame that applies", "month st_dt_1 = start_date.replace(day=1, hour=0, minute=0, second=0, microsecond=0) params = { 'start_ts': st_dt_1.strftime('%Y-%m-%d'),", "to the # degree-day DataFrame new_dfs.append(df_new) print('{} new months'.format(len(df_new))) else: print() except: print('{}:", "'start_ts': st_dt_1.strftime('%Y-%m-%d'), 'averaging': '1H' } sensor_id = '{}_temp'.format(stn) resp = requests.get(BMON_URL.format(sensor_id), params=params).json() if", "and 'hdd65' are the heating degree-day values: the first is base 60 degree", "print('{}: {}'.format(*sys.exc_info()[:2])) # Create a new DataFrame that combines the existing data with", "'temp']) df.set_index('ts', inplace=True) df.index = pd.to_datetime(df.index) # calculate the percentage of each month", "of the month dfc['coverage'] = dfc.temp / dfc.total_hours # Now back to the", "* dfc.total_hours # Convert index timestamps to beginning of the month mos =", "(compression = 'bz2'). It also saves the DataFrame as a CSV file at", "pandas as pd import requests from io import BytesIO b = requests.get('http://ahfc.webfactional.com/data/degree_days.pkl').content d", "df_one_site = df.query(\"station == 'PAMR' and month >= '2018-01-01'\") \"\"\" from os.path import", "site. Missing hours are assumed to not deviate from the average of the", "rest of the hours that do have data. ----------------------------------- NOTES ON UTILIZING THE", "len(df_new): # put this DataFrame in a form that can be concatenated to", "else: print() except: print('{}: {}'.format(*sys.exc_info()[:2])) # Create a new DataFrame that combines the", "the script to run on the first day of the month so that", "dfm['hdd60'] = dfm.hdd60 * dfc.total_hours dfm['hdd65'] = dfm.hdd65 * dfc.total_hours # Convert index", "new_dfs.append(df_new) print('{} new months'.format(len(df_new))) else: print() except: print('{}: {}'.format(*sys.exc_info()[:2])) # Create a new", "Pandas DataFrame with the path 'data/degree_days.pkl' (compression = 'bz2'). It also saves the", "month or a partial month may be prematurely added to the DataFrame because", "DataFrame because it satisfies the MIN_COVERAGE check described below. This script assumes the", "import datetime, timedelta import pandas as pd import requests # Minimum fraction of", "prior month's degree days will be available. Dont' run the script late in", "* dfc.total_hours dfm['hdd65'] = dfm.hdd65 * dfc.total_hours # Convert index timestamps to beginning", "# get last month present for this station last_mo = df_exist.loc[stn].month.max() # get", "fraction of the hours in a month that must have data in order", "= pd.read_pickle(join(APP_PATH, 'data/degree_days.pkl'), compression='bz2') # list of new DataFrames to add to the", "df.temp] df.drop(['temp'], axis=1, inplace=True) dfm = df.resample('1M').mean() dfm['coverage'] = dfc.coverage dfm['hdd60'] = dfm.hdd60", "dfm = df.resample('1M').mean() dfm['coverage'] = dfc.coverage dfm['hdd60'] = dfm.hdd60 * dfc.total_hours dfm['hdd65'] =", "days for missing months df_new = dd_for_site(stn, next_mo).query('coverage > @MIN_COVERAGE').copy() if len(df_new): #", "script controls the minimum amount of data coverage a month must have before", "of the month. The columns of the DataFrame are \"hdd65\" and \"hdd60\" to", "job that schedules the script to run on the first day of the", "params=params).json() if resp['status']=='success': df = pd.DataFrame(resp['data']['readings'], columns=['ts', 'temp']) df.set_index('ts', inplace=True) df.index = pd.to_datetime(df.index)", "can read it with the following code: import pandas as pd import requests", "site with a subset of the months: df_one_site = df.query(\"station == 'PAMR' and", "the first day of the month. The columns of the DataFrame are \"hdd65\"", "the main dataframe to calc degree-days df['hdd60'] = [(60.0 - x)/24.0 if x<60.0", "DataFrame as a CSV file at 'data/degree_days.csv'. The new degree-day information comes from", "'PAMR_temp'. The MIN_COVERAGE constant in the script controls the minimum amount of data", "dd_for_site(stn, start_date): \"\"\"Returns a Pandas Dataframe of monthly heating degree-day values for 'stn'", "read this DataFrame back into a Python script, you can excecute the following", "a subset of the months: df_one_site = df.query(\"station == 'PAMR' and month >=", "+ timedelta(days=32) # could be a DST change in there; add 32 days", "for this station last_mo = df_exist.loc[stn].month.max() # get a date in the following", "import BytesIO b = requests.get('http://ahfc.webfactional.com/data/degree_days.pkl').content d = pd.read_pickle(BytesIO(b), compression='bz2') Once you have a", "degree-days and base 60 F degree-days. Temperature data used to calculate degree-days comes", "to be safe # get degree days for missing months df_new = dd_for_site(stn,", "of data coverage a month must have before being included. Missing data is", "= '{}_temp'.format(stn) resp = requests.get(BMON_URL.format(sensor_id), params=params).json() if resp['status']=='success': df = pd.DataFrame(resp['data']['readings'], columns=['ts', 'temp'])", "for missing months df_new = dd_for_site(stn, next_mo).query('coverage > @MIN_COVERAGE').copy() if len(df_new): # put", "site API BMON_URL = 'https://bms.ahfc.us/api/v1/readings/{}/' def dd_for_site(stn, start_date): \"\"\"Returns a Pandas Dataframe of", "calculate the percentage of each month that has data dfc = df.resample('1M').count() dfc['total_hours']", "prematurely added to the DataFrame because it satisfies the MIN_COVERAGE check described below.", "'averaging': '1H' } sensor_id = '{}_temp'.format(stn) resp = requests.get(BMON_URL.format(sensor_id), params=params).json() if resp['status']=='success': df", "values. This script will acquire temperature data from the AHFC BMON site in", "base 65 F degree-days and base 60 F degree-days. Temperature data used to", "returned, that being the first day of the month. The columns of the", "in there; add 32 days to be safe # get degree days for", "through the end of available data. In the returned DataFrame, the index has", "CSV file at 'data/degree_days.csv'. The new degree-day information comes from the AHFC BMON", "Missing data is filled in with the average value for the rest of", "that schedules the script to run on the first day of the month", "hours in a month that must have data in order # to include", "be concatenated to the existing one df_new.reset_index(inplace=True) df_new.index = [stn] * len(df_new) df_new.index.name", "pickled DataFrame already exists and has the following format: month hdd60 hdd65 station", "it with the following code: import pandas as pd import requests from io", "station code with '_temp' appended, e.g. 'PAMR_temp'. The MIN_COVERAGE constant in the script", "DataFrame that combines the existing data with the new. df_final = pd.concat([df_exist] +", "requests from io import BytesIO b = requests.get('http://ahfc.webfactional.com/data/degree_days.pkl').content d = pd.read_pickle(BytesIO(b), compression='bz2') Once", "returned DataFrame, the index has a timestamp for each month returned, that being", "the returned DataFrame, the index has a timestamp for each month returned, that", "the month dfc['coverage'] = dfc.temp / dfc.total_hours # Now back to the main", "else 0.0 for x in df.temp] df['hdd65'] = [(65.0 - x)/24.0 if x<65.0", "DST change in there; add 32 days to be safe # get degree", "dfc.total_hours # Convert index timestamps to beginning of the month mos = [datetime(d.year,", "to the list of new DataFrames to eventually add to the # degree-day", "BMON sensor ID for a weather station's temperature data is the 4-letter station", "the end of available data. In the returned DataFrame, the index has a", "DataFrame back into a Python script, you can excecute the following if the", "of the hours that do have data. ----------------------------------- NOTES ON UTILIZING THE DATA", "This script is typically run from a Cron job that schedules the script", "{}'.format(datetime.now().ctime())) # path to this directory APP_PATH = dirname(realpath(__file__)) # URL to the", "\"hdd65\" and \"hdd60\" to designate base 65 F degree-days and base 60 F", "degree day values to a pickled Pandas DataFrame with the path 'data/degree_days.pkl' (compression", "timedelta(days=32) # could be a DST change in there; add 32 days to", "directory APP_PATH = dirname(realpath(__file__)) # URL to the AHFC BMON site API BMON_URL", "pandas as pd import requests # Minimum fraction of the hours in a", "= 'https://bms.ahfc.us/api/v1/readings/{}/' def dd_for_site(stn, start_date): \"\"\"Returns a Pandas Dataframe of monthly heating degree-day", "last day of the month dfc['coverage'] = dfc.temp / dfc.total_hours # Now back", "day of the month dfc['coverage'] = dfc.temp / dfc.total_hours # Now back to", "x<65.0 else 0.0 for x in df.temp] df.drop(['temp'], axis=1, inplace=True) dfm = df.resample('1M').mean()", "the prior month's degree days will be available. Dont' run the script late", "to calculate degree-days comes from the AHFC BMON site. Missing hours are assumed", "months not already present in the DataFrame. All stations found in the index", "first day of the month. The columns of the DataFrame are \"hdd65\" and", "the list of new DataFrames to eventually add to the # degree-day DataFrame", "as a CSV file at 'data/degree_days.csv'. The new degree-day information comes from the", "df_one_site = df.query(\"station == 'PAMR'\") (slower than above technique) To extract one site", "applies to one site by: df_one_site = df.loc['PAMR'] or df_one_site = df.query(\"station ==", "resp = requests.get(BMON_URL.format(sensor_id), params=params).json() if resp['status']=='success': df = pd.DataFrame(resp['data']['readings'], columns=['ts', 'temp']) df.set_index('ts', inplace=True)", "the MIN_COVERAGE check described below. This script assumes the pickled DataFrame already exists", "on a local drive: import pandas as pd df = pd.read_pickle('degree_days.pkl', compression='bz2') If", "# list of new DataFrames to add to the existing one new_dfs =", "the DataFrame as a compressed pickle and a CSV file. df_final.to_pickle(join(APP_PATH, 'data/degree_days.pkl'), compression='bz2',", "first day of the month so that the prior month's degree days will", "that has data dfc = df.resample('1M').count() dfc['total_hours'] = [i.day * 24 for i", "the average of the data present. The column 'coverage' indicates the fraction of", "inplace=True) dfm = df.resample('1M').mean() dfm['coverage'] = dfc.coverage dfm['hdd60'] = dfm.hdd60 * dfc.total_hours dfm['hdd65']", "try: # get last month present for this station last_mo = df_exist.loc[stn].month.max() #", "monthly heating degree day values to a pickled Pandas DataFrame with the path", "with the path 'data/degree_days.pkl' (compression = 'bz2'). It also saves the DataFrame as", "month df_final.reset_index(inplace=True) df_final.sort_values(['station', 'month'], inplace=True) df_final.set_index('station', inplace=True) # Save the DataFrame as a", "hour=0, minute=0, second=0, microsecond=0) params = { 'start_ts': st_dt_1.strftime('%Y-%m-%d'), 'averaging': '1H' } sensor_id", "inplace=True) df.index = pd.to_datetime(df.index) # calculate the percentage of each month that has", "that must have data in order # to include the month. MIN_COVERAGE =", "x)/24.0 if x<65.0 else 0.0 for x in df.temp] df.drop(['temp'], axis=1, inplace=True) dfm", "stn in df_exist.index.unique(): print('Processing {}: '.format(stn), end='') try: # get last month present", "it to the list of new DataFrames to eventually add to the #", "0.0 for x in df.temp] df.drop(['temp'], axis=1, inplace=True) dfm = df.resample('1M').mean() dfm['coverage'] =", "as a compressed pickle and a CSV file. df_final.to_pickle(join(APP_PATH, 'data/degree_days.pkl'), compression='bz2', protocol=4) df_final.to_csv(join(APP_PATH,", "- x)/24.0 if x<60.0 else 0.0 for x in df.temp] df['hdd65'] = [(65.0", "acquire temperature data from the AHFC BMON site in order to calculate the", "have before being included. Missing data is filled in with the average value", "extract that portion of the DataFrame that applies to one site by: df_one_site", "= dirname(realpath(__file__)) # URL to the AHFC BMON site API BMON_URL = 'https://bms.ahfc.us/api/v1/readings/{}/'", "for i in dfc.index] # index is last day of the month dfc['coverage']", "DataFrame is available on a local drive: import pandas as pd df =", "is base 60 degree F values and the second is base 65 deg", "concatenated to the existing one df_new.reset_index(inplace=True) df_new.index = [stn] * len(df_new) df_new.index.name =", "local drive: import pandas as pd df = pd.read_pickle('degree_days.pkl', compression='bz2') If the file", "df_new.reset_index(inplace=True) df_new.index = [stn] * len(df_new) df_new.index.name = 'station' df_new.drop(columns=['coverage'], inplace=True) # add", "Minimum fraction of the hours in a month that must have data in", "The script assumes that the BMON sensor ID for a weather station's temperature", "the hours that do have data. ----------------------------------- NOTES ON UTILIZING THE DATA To", "= df.loc['PAMR'] or df_one_site = df.query(\"station == 'PAMR'\") (slower than above technique) To", "AHFC BMON site. Missing hours are assumed to not deviate from the average", "60 degree F values and the second is base 65 deg F values.", "The MIN_COVERAGE constant in the script controls the minimum amount of data coverage", "# could be a DST change in there; add 32 days to be", "24 for i in dfc.index] # index is last day of the month", "datetime import datetime, timedelta import pandas as pd import requests # Minimum fraction", "in order # to include the month. MIN_COVERAGE = 0.7 print('\\nScript Start: {}'.format(datetime.now().ctime()))", "degree-days comes from the AHFC BMON site. Missing hours are assumed to not", "the following code: import pandas as pd import requests from io import BytesIO", "data. \"\"\" # get beginning of month st_dt_1 = start_date.replace(day=1, hour=0, minute=0, second=0,", "'https://bms.ahfc.us/api/v1/readings/{}/' def dd_for_site(stn, start_date): \"\"\"Returns a Pandas Dataframe of monthly heating degree-day values", "months'.format(len(df_new))) else: print() except: print('{}: {}'.format(*sys.exc_info()[:2])) # Create a new DataFrame that combines", "F degree-days and base 60 F degree-days. Temperature data used to calculate degree-days", "data. In the returned DataFrame, the index has a timestamp for each month", "you have a DataFrame, you can extract that portion of the DataFrame that", "base 65 deg F values. This script will acquire temperature data from the", "assumes the pickled DataFrame already exists and has the following format: month hdd60", "file is located on a web server, you can read it with the", "Weather Service 4-letter station code. The 'month' column is a first-of-the-month date identifying", "monthly heating degree-day values for 'stn' (a NWS weather site code). Degree days", "on a web server, you can read it with the following code: import", "'1H' } sensor_id = '{}_temp'.format(stn) resp = requests.get(BMON_URL.format(sensor_id), params=params).json() if resp['status']=='success': df =", "the 4-letter station code with '_temp' appended, e.g. 'PAMR_temp'. The MIN_COVERAGE constant in", "may be prematurely added to the DataFrame because it satisfies the MIN_COVERAGE check", "new_dfs = [] for stn in df_exist.index.unique(): print('Processing {}: '.format(stn), end='') try: #", "BMON site API BMON_URL = 'https://bms.ahfc.us/api/v1/readings/{}/' def dd_for_site(stn, start_date): \"\"\"Returns a Pandas Dataframe", "it sorted by station and month df_final.reset_index(inplace=True) df_final.sort_values(['station', 'month'], inplace=True) df_final.set_index('station', inplace=True) #", "found in the index of the DataFrame will be updated. The script assumes", "# Save the DataFrame as a compressed pickle and a CSV file. df_final.to_pickle(join(APP_PATH,", "month hdd60 hdd65 station PAED 2018-02-01 1257.648675 1397.648675 PAED 2018-03-01 1028.027773 1183.027773 The", "be safe # get degree days for missing months df_new = dd_for_site(stn, next_mo).query('coverage", "requests.get(BMON_URL.format(sensor_id), params=params).json() if resp['status']=='success': df = pd.DataFrame(resp['data']['readings'], columns=['ts', 'temp']) df.set_index('ts', inplace=True) df.index =", "# get degree days for missing months df_new = dd_for_site(stn, next_mo).query('coverage > @MIN_COVERAGE').copy()", "df.index = pd.to_datetime(df.index) # calculate the percentage of each month that has data", "= dfm.hdd65 * dfc.total_hours # Convert index timestamps to beginning of the month", "the following format: month hdd60 hdd65 station PAED 2018-02-01 1257.648675 1397.648675 PAED 2018-03-01", "import dirname, join, realpath import sys from datetime import datetime, timedelta import pandas", "excecute the following if the DataFrame is available on a local drive: import", "F degree-days. Temperature data used to calculate degree-days comes from the AHFC BMON", "first is base 60 degree F values and the second is base 65", "script is typically run from a Cron job that schedules the script to", "described below. This script assumes the pickled DataFrame already exists and has the", "DataFrame, you can extract that portion of the DataFrame that applies to one", "To extract one site with a subset of the months: df_one_site = df.query(\"station", "e.g. 'PAMR_temp'. The MIN_COVERAGE constant in the script controls the minimum amount of", "because it satisfies the MIN_COVERAGE check described below. This script assumes the pickled", "as pd import requests from io import BytesIO b = requests.get('http://ahfc.webfactional.com/data/degree_days.pkl').content d =", "sys from datetime import datetime, timedelta import pandas as pd import requests #", "month. MIN_COVERAGE = 0.7 print('\\nScript Start: {}'.format(datetime.now().ctime())) # path to this directory APP_PATH", "pd df = pd.read_pickle('degree_days.pkl', compression='bz2') If the file is located on a web", "path to this directory APP_PATH = dirname(realpath(__file__)) # URL to the AHFC BMON", "month that must have data in order # to include the month. MIN_COVERAGE", "constant in the script controls the minimum amount of data coverage a month", "x in df.temp] df.drop(['temp'], axis=1, inplace=True) dfm = df.resample('1M').mean() dfm['coverage'] = dfc.coverage dfm['hdd60']", "already present in the DataFrame. All stations found in the index of the", "0.7 print('\\nScript Start: {}'.format(datetime.now().ctime())) # path to this directory APP_PATH = dirname(realpath(__file__)) #", "column is a first-of-the-month date identifying the month whose degree-days are shown. 'hdd60'", "microsecond=0) params = { 'start_ts': st_dt_1.strftime('%Y-%m-%d'), 'averaging': '1H' } sensor_id = '{}_temp'.format(stn) resp", "pd.read_pickle('degree_days.pkl', compression='bz2') If the file is located on a web server, you can", "can excecute the following if the DataFrame is available on a local drive:", "= dfc.coverage dfm['hdd60'] = dfm.hdd60 * dfc.total_hours dfm['hdd65'] = dfm.hdd65 * dfc.total_hours #", "present for this station last_mo = df_exist.loc[stn].month.max() # get a date in the", "the DataFrame that applies to one site by: df_one_site = df.loc['PAMR'] or df_one_site", "hdd65 station PAED 2018-02-01 1257.648675 1397.648675 PAED 2018-03-01 1028.027773 1183.027773 The index is", "= df.resample('1M').mean() dfm['coverage'] = dfc.coverage dfm['hdd60'] = dfm.hdd60 * dfc.total_hours dfm['hdd65'] = dfm.hdd65", "last_mo = df_exist.loc[stn].month.max() # get a date in the following month next_mo =", "mos = [datetime(d.year, d.month, 1) for d in dfm.index] dfm.index = mos dfm.index.name", "script to run on the first day of the month so that the", "on the first day of the month so that the prior month's degree", "AHFC BMON site in order to calculate the degree-days for the most recent", "requests # Minimum fraction of the hours in a month that must have", "a partial month may be prematurely added to the DataFrame because it satisfies", "os.path import dirname, join, realpath import sys from datetime import datetime, timedelta import", "data present. The column 'coverage' indicates the fraction of the months hours that", "'hdd65' are the heating degree-day values: the first is base 60 degree F", "a first-of-the-month date identifying the month whose degree-days are shown. 'hdd60' and 'hdd65'", "following code: import pandas as pd import requests from io import BytesIO b", "resp['status']=='success': df = pd.DataFrame(resp['data']['readings'], columns=['ts', 'temp']) df.set_index('ts', inplace=True) df.index = pd.to_datetime(df.index) # calculate", "the path 'data/degree_days.pkl' (compression = 'bz2'). It also saves the DataFrame as a", "a new DataFrame that combines the existing data with the new. df_final =", "is base 65 deg F values. This script will acquire temperature data from", "realpath import sys from datetime import datetime, timedelta import pandas as pd import", "percentage of each month that has data dfc = df.resample('1M').count() dfc['total_hours'] = [i.day", "of month st_dt_1 = start_date.replace(day=1, hour=0, minute=0, second=0, microsecond=0) params = { 'start_ts':", "import requests # Minimum fraction of the hours in a month that must", "dfm['hdd65'] = dfm.hdd65 * dfc.total_hours # Convert index timestamps to beginning of the", "drive: import pandas as pd df = pd.read_pickle('degree_days.pkl', compression='bz2') If the file is", "existing data with the new. df_final = pd.concat([df_exist] + new_dfs) # get it", "= 'station' df_new.drop(columns=['coverage'], inplace=True) # add it to the list of new DataFrames", "df['hdd60'] = [(60.0 - x)/24.0 if x<60.0 else 0.0 for x in df.temp]", "base 60 degree F values and the second is base 65 deg F", "of monthly heating degree-day values for 'stn' (a NWS weather site code). Degree", "Pandas Dataframe of monthly heating degree-day values for 'stn' (a NWS weather site", "information comes from the AHFC BMON site, https://bms.ahfc.us . This script is typically", "timestamps to beginning of the month mos = [datetime(d.year, d.month, 1) for d", "month whose degree-days are shown. 'hdd60' and 'hdd65' are the heating degree-day values:", "new DataFrame that combines the existing data with the new. df_final = pd.concat([df_exist]", "web server, you can read it with the following code: import pandas as", "satisfies the MIN_COVERAGE check described below. This script assumes the pickled DataFrame already", "BytesIO b = requests.get('http://ahfc.webfactional.com/data/degree_days.pkl').content d = pd.read_pickle(BytesIO(b), compression='bz2') Once you have a DataFrame,", "to the existing one new_dfs = [] for stn in df_exist.index.unique(): print('Processing {}:", "one site by: df_one_site = df.loc['PAMR'] or df_one_site = df.query(\"station == 'PAMR'\") (slower", "values: the first is base 60 degree F values and the second is", "dirname, join, realpath import sys from datetime import datetime, timedelta import pandas as", "df_final.sort_values(['station', 'month'], inplace=True) df_final.set_index('station', inplace=True) # Save the DataFrame as a compressed pickle", "It also saves the DataFrame as a CSV file at 'data/degree_days.csv'. The new", "are assumed to not deviate from the average of the data present. The", "to the main dataframe to calc degree-days df['hdd60'] = [(60.0 - x)/24.0 if", "the existing data with the new. df_final = pd.concat([df_exist] + new_dfs) # get", "National Weather Service 4-letter station code. The 'month' column is a first-of-the-month date", "the month that 'start_date' (Python date/time object) falls in and continue through the", "(Python date/time object) falls in and continue through the end of available data.", "df.set_index('ts', inplace=True) df.index = pd.to_datetime(df.index) # calculate the percentage of each month that", "date in the following month next_mo = last_mo + timedelta(days=32) # could be", "included. Missing data is filled in with the average value for the rest", "AHFC BMON site API BMON_URL = 'https://bms.ahfc.us/api/v1/readings/{}/' def dd_for_site(stn, start_date): \"\"\"Returns a Pandas", "st_dt_1 = start_date.replace(day=1, hour=0, minute=0, second=0, microsecond=0) params = { 'start_ts': st_dt_1.strftime('%Y-%m-%d'), 'averaging':", "DataFrame already exists and has the following format: month hdd60 hdd65 station PAED", "inplace=True) # add it to the list of new DataFrames to eventually add", "that portion of the DataFrame that applies to one site by: df_one_site =", "degree days will be available. Dont' run the script late in the month", "or a partial month may be prematurely added to the DataFrame because it", "subset of the months: df_one_site = df.query(\"station == 'PAMR' and month >= '2018-01-01'\")", "Create a new DataFrame that combines the existing data with the new. df_final", "UTILIZING THE DATA To read this DataFrame back into a Python script, you", "== 'PAMR'\") (slower than above technique) To extract one site with a subset", "station code. The 'month' column is a first-of-the-month date identifying the month whose", "beginning of month st_dt_1 = start_date.replace(day=1, hour=0, minute=0, second=0, microsecond=0) params = {", "except: print('{}: {}'.format(*sys.exc_info()[:2])) # Create a new DataFrame that combines the existing data", "dfc.index] # index is last day of the month dfc['coverage'] = dfc.temp /", "params = { 'start_ts': st_dt_1.strftime('%Y-%m-%d'), 'averaging': '1H' } sensor_id = '{}_temp'.format(stn) resp =", "to the DataFrame because it satisfies the MIN_COVERAGE check described below. This script", "in the script controls the minimum amount of data coverage a month must", "following if the DataFrame is available on a local drive: import pandas as", "of the months hours that actually have data. \"\"\" # get beginning of", "/ dfc.total_hours # Now back to the main dataframe to calc degree-days df['hdd60']", "there; add 32 days to be safe # get degree days for missing", "a DataFrame, you can extract that portion of the DataFrame that applies to", "controls the minimum amount of data coverage a month must have before being", "dfc = df.resample('1M').count() dfc['total_hours'] = [i.day * 24 for i in dfc.index] #", "# Minimum fraction of the hours in a month that must have data", "DataFrames to eventually add to the # degree-day DataFrame new_dfs.append(df_new) print('{} new months'.format(len(df_new)))", "sorted by station and month df_final.reset_index(inplace=True) df_final.sort_values(['station', 'month'], inplace=True) df_final.set_index('station', inplace=True) # Save", "= dfm.hdd60 * dfc.total_hours dfm['hdd65'] = dfm.hdd65 * dfc.total_hours # Convert index timestamps", "= [(65.0 - x)/24.0 if x<65.0 else 0.0 for x in df.temp] df.drop(['temp'],", "weather site code). Degree days start in the month that 'start_date' (Python date/time", "a pickled Pandas DataFrame with the path 'data/degree_days.pkl' (compression = 'bz2'). It also", "[i.day * 24 for i in dfc.index] # index is last day of", "APP_PATH = dirname(realpath(__file__)) # URL to the AHFC BMON site API BMON_URL =", "the rest of the hours that do have data. ----------------------------------- NOTES ON UTILIZING", "new months'.format(len(df_new))) else: print() except: print('{}: {}'.format(*sys.exc_info()[:2])) # Create a new DataFrame that", "of the month so that the prior month's degree days will be available.", "is the National Weather Service 4-letter station code. The 'month' column is a", "if resp['status']=='success': df = pd.DataFrame(resp['data']['readings'], columns=['ts', 'temp']) df.set_index('ts', inplace=True) df.index = pd.to_datetime(df.index) #", "print('{} new months'.format(len(df_new))) else: print() except: print('{}: {}'.format(*sys.exc_info()[:2])) # Create a new DataFrame", "get last month present for this station last_mo = df_exist.loc[stn].month.max() # get a", "change in there; add 32 days to be safe # get degree days", "Missing hours are assumed to not deviate from the average of the data", "the # degree-day DataFrame new_dfs.append(df_new) print('{} new months'.format(len(df_new))) else: print() except: print('{}: {}'.format(*sys.exc_info()[:2]))", "being the first day of the month. The columns of the DataFrame are", "check described below. This script assumes the pickled DataFrame already exists and has", "is the 4-letter station code with '_temp' appended, e.g. 'PAMR_temp'. The MIN_COVERAGE constant", "pd import requests from io import BytesIO b = requests.get('http://ahfc.webfactional.com/data/degree_days.pkl').content d = pd.read_pickle(BytesIO(b),", "degree-day values for 'stn' (a NWS weather site code). Degree days start in", "have data. ----------------------------------- NOTES ON UTILIZING THE DATA To read this DataFrame back", "df_exist.index.unique(): print('Processing {}: '.format(stn), end='') try: # get last month present for this", "DataFrame new_dfs.append(df_new) print('{} new months'.format(len(df_new))) else: print() except: print('{}: {}'.format(*sys.exc_info()[:2])) # Create a", "weather station's temperature data is the 4-letter station code with '_temp' appended, e.g.", "a CSV file at 'data/degree_days.csv'. The new degree-day information comes from the AHFC", "= last_mo + timedelta(days=32) # could be a DST change in there; add", "following format: month hdd60 hdd65 station PAED 2018-02-01 1257.648675 1397.648675 PAED 2018-03-01 1028.027773", "hours are assumed to not deviate from the average of the data present.", "existing one df_new.reset_index(inplace=True) df_new.index = [stn] * len(df_new) df_new.index.name = 'station' df_new.drop(columns=['coverage'], inplace=True)", "df_final.reset_index(inplace=True) df_final.sort_values(['station', 'month'], inplace=True) df_final.set_index('station', inplace=True) # Save the DataFrame as a compressed", "= { 'start_ts': st_dt_1.strftime('%Y-%m-%d'), 'averaging': '1H' } sensor_id = '{}_temp'.format(stn) resp = requests.get(BMON_URL.format(sensor_id),", "from the AHFC BMON site in order to calculate the degree-days for the", "DataFrame as a compressed pickle and a CSV file. df_final.to_pickle(join(APP_PATH, 'data/degree_days.pkl'), compression='bz2', protocol=4)", "= pd.concat([df_exist] + new_dfs) # get it sorted by station and month df_final.reset_index(inplace=True)", "the following month next_mo = last_mo + timedelta(days=32) # could be a DST", "This script assumes the pickled DataFrame already exists and has the following format:", "pd.to_datetime(df.index) # calculate the percentage of each month that has data dfc =", "sensor_id = '{}_temp'.format(stn) resp = requests.get(BMON_URL.format(sensor_id), params=params).json() if resp['status']=='success': df = pd.DataFrame(resp['data']['readings'], columns=['ts',", "new. df_final = pd.concat([df_exist] + new_dfs) # get it sorted by station and", "df_new.drop(columns=['coverage'], inplace=True) # add it to the list of new DataFrames to eventually", "must have data in order # to include the month. MIN_COVERAGE = 0.7", "for a weather station's temperature data is the 4-letter station code with '_temp'", "inplace=True) df_final.set_index('station', inplace=True) # Save the DataFrame as a compressed pickle and a", "# to include the month. MIN_COVERAGE = 0.7 print('\\nScript Start: {}'.format(datetime.now().ctime())) # path", "available data. In the returned DataFrame, the index has a timestamp for each", "the DataFrame will be updated. The script assumes that the BMON sensor ID", "mos dfm.index.name = 'month' else: raise ValueError(str(resp['data'])) return dfm if __name__ == '__main__':", "DataFrame will be updated. The script assumes that the BMON sensor ID for", "= df.resample('1M').count() dfc['total_hours'] = [i.day * 24 for i in dfc.index] # index", "whose degree-days are shown. 'hdd60' and 'hdd65' are the heating degree-day values: the", "for stn in df_exist.index.unique(): print('Processing {}: '.format(stn), end='') try: # get last month", "with the new. df_final = pd.concat([df_exist] + new_dfs) # get it sorted by", "stations found in the index of the DataFrame will be updated. The script", "station last_mo = df_exist.loc[stn].month.max() # get a date in the following month next_mo", "dfc.total_hours # Now back to the main dataframe to calc degree-days df['hdd60'] =", "dfm.hdd65 * dfc.total_hours # Convert index timestamps to beginning of the month mos", "already exists and has the following format: month hdd60 hdd65 station PAED 2018-02-01", "'station' df_new.drop(columns=['coverage'], inplace=True) # add it to the list of new DataFrames to", "Python script, you can excecute the following if the DataFrame is available on", "df_final = pd.concat([df_exist] + new_dfs) # get it sorted by station and month", "in the DataFrame. All stations found in the index of the DataFrame will", "the AHFC BMON site API BMON_URL = 'https://bms.ahfc.us/api/v1/readings/{}/' def dd_for_site(stn, start_date): \"\"\"Returns a", "for d in dfm.index] dfm.index = mos dfm.index.name = 'month' else: raise ValueError(str(resp['data']))", "ID for a weather station's temperature data is the 4-letter station code with", "the AHFC BMON site, https://bms.ahfc.us . This script is typically run from a", "that actually have data. \"\"\" # get beginning of month st_dt_1 = start_date.replace(day=1,", "BMON site. Missing hours are assumed to not deviate from the average of", "get degree days for missing months df_new = dd_for_site(stn, next_mo).query('coverage > @MIN_COVERAGE').copy() if", "d.month, 1) for d in dfm.index] dfm.index = mos dfm.index.name = 'month' else:", "and base 60 F degree-days. Temperature data used to calculate degree-days comes from", "object) falls in and continue through the end of available data. In the", "a Cron job that schedules the script to run on the first day", "'month'], inplace=True) df_final.set_index('station', inplace=True) # Save the DataFrame as a compressed pickle and", "BMON site, https://bms.ahfc.us . This script is typically run from a Cron job", "sensor ID for a weather station's temperature data is the 4-letter station code", "= pd.to_datetime(df.index) # calculate the percentage of each month that has data dfc", "'__main__': df_exist = pd.read_pickle(join(APP_PATH, 'data/degree_days.pkl'), compression='bz2') # list of new DataFrames to add", "end='') try: # get last month present for this station last_mo = df_exist.loc[stn].month.max()", "heating degree-day values for 'stn' (a NWS weather site code). Degree days start", "ValueError(str(resp['data'])) return dfm if __name__ == '__main__': df_exist = pd.read_pickle(join(APP_PATH, 'data/degree_days.pkl'), compression='bz2') #", "filled in with the average value for the rest of the hours that", "----------------------------------- NOTES ON UTILIZING THE DATA To read this DataFrame back into a", "If the file is located on a web server, you can read it", "start_date): \"\"\"Returns a Pandas Dataframe of monthly heating degree-day values for 'stn' (a", "dfc.coverage dfm['hdd60'] = dfm.hdd60 * dfc.total_hours dfm['hdd65'] = dfm.hdd65 * dfc.total_hours # Convert", "to calculate the degree-days for the most recent months not already present in", "used to calculate degree-days comes from the AHFC BMON site. Missing hours are", "comes from the AHFC BMON site, https://bms.ahfc.us . This script is typically run", "new_dfs) # get it sorted by station and month df_final.reset_index(inplace=True) df_final.sort_values(['station', 'month'], inplace=True)", "month dfc['coverage'] = dfc.temp / dfc.total_hours # Now back to the main dataframe", "2018-03-01 1028.027773 1183.027773 The index is the National Weather Service 4-letter station code.", "dirname(realpath(__file__)) # URL to the AHFC BMON site API BMON_URL = 'https://bms.ahfc.us/api/v1/readings/{}/' def", "of available data. In the returned DataFrame, the index has a timestamp for", "months df_new = dd_for_site(stn, next_mo).query('coverage > @MIN_COVERAGE').copy() if len(df_new): # put this DataFrame", "'{}_temp'.format(stn) resp = requests.get(BMON_URL.format(sensor_id), params=params).json() if resp['status']=='success': df = pd.DataFrame(resp['data']['readings'], columns=['ts', 'temp']) df.set_index('ts',", "'data/degree_days.csv'. The new degree-day information comes from the AHFC BMON site, https://bms.ahfc.us .", "'stn' (a NWS weather site code). Degree days start in the month that", "1183.027773 The index is the National Weather Service 4-letter station code. The 'month'", "that adds monthly heating degree day values to a pickled Pandas DataFrame with", "last month present for this station last_mo = df_exist.loc[stn].month.max() # get a date", "else: raise ValueError(str(resp['data'])) return dfm if __name__ == '__main__': df_exist = pd.read_pickle(join(APP_PATH, 'data/degree_days.pkl'),", "next_mo = last_mo + timedelta(days=32) # could be a DST change in there;", "form that can be concatenated to the existing one df_new.reset_index(inplace=True) df_new.index = [stn]", "in df_exist.index.unique(): print('Processing {}: '.format(stn), end='') try: # get last month present for", "to the AHFC BMON site API BMON_URL = 'https://bms.ahfc.us/api/v1/readings/{}/' def dd_for_site(stn, start_date): \"\"\"Returns", "pd.read_pickle(BytesIO(b), compression='bz2') Once you have a DataFrame, you can extract that portion of", "df_exist = pd.read_pickle(join(APP_PATH, 'data/degree_days.pkl'), compression='bz2') # list of new DataFrames to add to", "timestamp for each month returned, that being the first day of the month.", "https://bms.ahfc.us . This script is typically run from a Cron job that schedules", "'_temp' appended, e.g. 'PAMR_temp'. The MIN_COVERAGE constant in the script controls the minimum", "have data. \"\"\" # get beginning of month st_dt_1 = start_date.replace(day=1, hour=0, minute=0,", "existing one new_dfs = [] for stn in df_exist.index.unique(): print('Processing {}: '.format(stn), end='')", "beginning of the month mos = [datetime(d.year, d.month, 1) for d in dfm.index]", "def dd_for_site(stn, start_date): \"\"\"Returns a Pandas Dataframe of monthly heating degree-day values for", "of the DataFrame are \"hdd65\" and \"hdd60\" to designate base 65 F degree-days", "code. The 'month' column is a first-of-the-month date identifying the month whose degree-days", "the file is located on a web server, you can read it with", "'hdd60' and 'hdd65' are the heating degree-day values: the first is base 60", "BMON_URL = 'https://bms.ahfc.us/api/v1/readings/{}/' def dd_for_site(stn, start_date): \"\"\"Returns a Pandas Dataframe of monthly heating", "columns=['ts', 'temp']) df.set_index('ts', inplace=True) df.index = pd.to_datetime(df.index) # calculate the percentage of each", "data used to calculate degree-days comes from the AHFC BMON site. Missing hours", "in dfc.index] # index is last day of the month dfc['coverage'] = dfc.temp", "to add to the existing one new_dfs = [] for stn in df_exist.index.unique():", "32 days to be safe # get degree days for missing months df_new", "{}'.format(*sys.exc_info()[:2])) # Create a new DataFrame that combines the existing data with the", "that applies to one site by: df_one_site = df.loc['PAMR'] or df_one_site = df.query(\"station", "of the DataFrame will be updated. The script assumes that the BMON sensor", "month returned, that being the first day of the month. The columns of", "# URL to the AHFC BMON site API BMON_URL = 'https://bms.ahfc.us/api/v1/readings/{}/' def dd_for_site(stn,", "== '__main__': df_exist = pd.read_pickle(join(APP_PATH, 'data/degree_days.pkl'), compression='bz2') # list of new DataFrames to", "the National Weather Service 4-letter station code. The 'month' column is a first-of-the-month", "df_one_site = df.loc['PAMR'] or df_one_site = df.query(\"station == 'PAMR'\") (slower than above technique)", "= df.query(\"station == 'PAMR'\") (slower than above technique) To extract one site with", "data is the 4-letter station code with '_temp' appended, e.g. 'PAMR_temp'. The MIN_COVERAGE", "the new. df_final = pd.concat([df_exist] + new_dfs) # get it sorted by station", "In the returned DataFrame, the index has a timestamp for each month returned,", "# path to this directory APP_PATH = dirname(realpath(__file__)) # URL to the AHFC", "indicates the fraction of the months hours that actually have data. \"\"\" #", "a month that must have data in order # to include the month.", "The columns of the DataFrame are \"hdd65\" and \"hdd60\" to designate base 65", "of new DataFrames to add to the existing one new_dfs = [] for", "the index has a timestamp for each month returned, that being the first", "* len(df_new) df_new.index.name = 'station' df_new.drop(columns=['coverage'], inplace=True) # add it to the list", "= dd_for_site(stn, next_mo).query('coverage > @MIN_COVERAGE').copy() if len(df_new): # put this DataFrame in a", "that 'start_date' (Python date/time object) falls in and continue through the end of", "if x<60.0 else 0.0 for x in df.temp] df['hdd65'] = [(65.0 - x)/24.0", "heating degree day values to a pickled Pandas DataFrame with the path 'data/degree_days.pkl'", "day of the month. The columns of the DataFrame are \"hdd65\" and \"hdd60\"", "deviate from the average of the data present. The column 'coverage' indicates the", "degree-days df['hdd60'] = [(60.0 - x)/24.0 if x<60.0 else 0.0 for x in", "x<60.0 else 0.0 for x in df.temp] df['hdd65'] = [(65.0 - x)/24.0 if", "updated. The script assumes that the BMON sensor ID for a weather station's", "MIN_COVERAGE constant in the script controls the minimum amount of data coverage a", "'2018-01-01'\") \"\"\" from os.path import dirname, join, realpath import sys from datetime import", "DataFrame. All stations found in the index of the DataFrame will be updated.", "are \"hdd65\" and \"hdd60\" to designate base 65 F degree-days and base 60", "the existing one new_dfs = [] for stn in df_exist.index.unique(): print('Processing {}: '.format(stn),", "the first day of the month so that the prior month's degree days", "assumes that the BMON sensor ID for a weather station's temperature data is", "the average value for the rest of the hours that do have data.", "the existing one df_new.reset_index(inplace=True) df_new.index = [stn] * len(df_new) df_new.index.name = 'station' df_new.drop(columns=['coverage'],", "present. The column 'coverage' indicates the fraction of the months hours that actually", "The index is the National Weather Service 4-letter station code. The 'month' column", "{}: '.format(stn), end='') try: # get last month present for this station last_mo", "and continue through the end of available data. In the returned DataFrame, the", "not deviate from the average of the data present. The column 'coverage' indicates", "# add it to the list of new DataFrames to eventually add to", "DATA To read this DataFrame back into a Python script, you can excecute", "site code). Degree days start in the month that 'start_date' (Python date/time object)", "not already present in the DataFrame. All stations found in the index of", "main dataframe to calc degree-days df['hdd60'] = [(60.0 - x)/24.0 if x<60.0 else", "second=0, microsecond=0) params = { 'start_ts': st_dt_1.strftime('%Y-%m-%d'), 'averaging': '1H' } sensor_id = '{}_temp'.format(stn)", "degree-days for the most recent months not already present in the DataFrame. All", "you can excecute the following if the DataFrame is available on a local", "a Python script, you can excecute the following if the DataFrame is available", "data coverage a month must have before being included. Missing data is filled", "'bz2'). It also saves the DataFrame as a CSV file at 'data/degree_days.csv'. The", "to beginning of the month mos = [datetime(d.year, d.month, 1) for d in", "with a subset of the months: df_one_site = df.query(\"station == 'PAMR' and month", "= requests.get('http://ahfc.webfactional.com/data/degree_days.pkl').content d = pd.read_pickle(BytesIO(b), compression='bz2') Once you have a DataFrame, you can", "of the hours in a month that must have data in order #", "len(df_new) df_new.index.name = 'station' df_new.drop(columns=['coverage'], inplace=True) # add it to the list of", "code). Degree days start in the month that 'start_date' (Python date/time object) falls", "in the month that 'start_date' (Python date/time object) falls in and continue through", "of new DataFrames to eventually add to the # degree-day DataFrame new_dfs.append(df_new) print('{}", "= 0.7 print('\\nScript Start: {}'.format(datetime.now().ctime())) # path to this directory APP_PATH = dirname(realpath(__file__))", "# Now back to the main dataframe to calc degree-days df['hdd60'] = [(60.0", "'coverage' indicates the fraction of the months hours that actually have data. \"\"\"", "day of the month so that the prior month's degree days will be", "data from the AHFC BMON site in order to calculate the degree-days for", "average value for the rest of the hours that do have data. -----------------------------------", "Now back to the main dataframe to calc degree-days df['hdd60'] = [(60.0 -", "into a Python script, you can excecute the following if the DataFrame is", "\"\"\"Script that adds monthly heating degree day values to a pickled Pandas DataFrame", "that do have data. ----------------------------------- NOTES ON UTILIZING THE DATA To read this", "x in df.temp] df['hdd65'] = [(65.0 - x)/24.0 if x<65.0 else 0.0 for", "falls in and continue through the end of available data. In the returned", "in df.temp] df.drop(['temp'], axis=1, inplace=True) dfm = df.resample('1M').mean() dfm['coverage'] = dfc.coverage dfm['hdd60'] =", "df_final.set_index('station', inplace=True) # Save the DataFrame as a compressed pickle and a CSV", "a Pandas Dataframe of monthly heating degree-day values for 'stn' (a NWS weather", "month must have before being included. Missing data is filled in with the", "= [] for stn in df_exist.index.unique(): print('Processing {}: '.format(stn), end='') try: # get", "# get it sorted by station and month df_final.reset_index(inplace=True) df_final.sort_values(['station', 'month'], inplace=True) df_final.set_index('station',", "by station and month df_final.reset_index(inplace=True) df_final.sort_values(['station', 'month'], inplace=True) df_final.set_index('station', inplace=True) # Save the", "df.loc['PAMR'] or df_one_site = df.query(\"station == 'PAMR'\") (slower than above technique) To extract", "the DataFrame are \"hdd65\" and \"hdd60\" to designate base 65 F degree-days and", "the month mos = [datetime(d.year, d.month, 1) for d in dfm.index] dfm.index =", "else 0.0 for x in df.temp] df.drop(['temp'], axis=1, inplace=True) dfm = df.resample('1M').mean() dfm['coverage']", "== 'PAMR' and month >= '2018-01-01'\") \"\"\" from os.path import dirname, join, realpath", "0.0 for x in df.temp] df['hdd65'] = [(65.0 - x)/24.0 if x<65.0 else", "hours that do have data. ----------------------------------- NOTES ON UTILIZING THE DATA To read", "dfc.temp / dfc.total_hours # Now back to the main dataframe to calc degree-days", "days will be available. Dont' run the script late in the month or", "the AHFC BMON site. Missing hours are assumed to not deviate from the", "THE DATA To read this DataFrame back into a Python script, you can", "= requests.get(BMON_URL.format(sensor_id), params=params).json() if resp['status']=='success': df = pd.DataFrame(resp['data']['readings'], columns=['ts', 'temp']) df.set_index('ts', inplace=True) df.index", "Cron job that schedules the script to run on the first day of", "\"hdd60\" to designate base 65 F degree-days and base 60 F degree-days. Temperature", "a timestamp for each month returned, that being the first day of the", "that can be concatenated to the existing one df_new.reset_index(inplace=True) df_new.index = [stn] *", "to include the month. MIN_COVERAGE = 0.7 print('\\nScript Start: {}'.format(datetime.now().ctime())) # path to", "of the data present. The column 'coverage' indicates the fraction of the months", "a DST change in there; add 32 days to be safe # get", "the heating degree-day values: the first is base 60 degree F values and", "in dfm.index] dfm.index = mos dfm.index.name = 'month' else: raise ValueError(str(resp['data'])) return dfm", "is last day of the month dfc['coverage'] = dfc.temp / dfc.total_hours # Now", "degree-day DataFrame new_dfs.append(df_new) print('{} new months'.format(len(df_new))) else: print() except: print('{}: {}'.format(*sys.exc_info()[:2])) # Create", "in order to calculate the degree-days for the most recent months not already", "the month whose degree-days are shown. 'hdd60' and 'hdd65' are the heating degree-day", "that combines the existing data with the new. df_final = pd.concat([df_exist] + new_dfs)", "typically run from a Cron job that schedules the script to run on", "from the AHFC BMON site. Missing hours are assumed to not deviate from", "the script controls the minimum amount of data coverage a month must have", "before being included. Missing data is filled in with the average value for", "[(60.0 - x)/24.0 if x<60.0 else 0.0 for x in df.temp] df['hdd65'] =", "a weather station's temperature data is the 4-letter station code with '_temp' appended,", "The column 'coverage' indicates the fraction of the months hours that actually have", "> @MIN_COVERAGE').copy() if len(df_new): # put this DataFrame in a form that can", "= [i.day * 24 for i in dfc.index] # index is last day", "station's temperature data is the 4-letter station code with '_temp' appended, e.g. 'PAMR_temp'.", "combines the existing data with the new. df_final = pd.concat([df_exist] + new_dfs) #", "will be updated. The script assumes that the BMON sensor ID for a", "day values to a pickled Pandas DataFrame with the path 'data/degree_days.pkl' (compression =", "site in order to calculate the degree-days for the most recent months not", "from io import BytesIO b = requests.get('http://ahfc.webfactional.com/data/degree_days.pkl').content d = pd.read_pickle(BytesIO(b), compression='bz2') Once you", "and \"hdd60\" to designate base 65 F degree-days and base 60 F degree-days.", "calc degree-days df['hdd60'] = [(60.0 - x)/24.0 if x<60.0 else 0.0 for x", "could be a DST change in there; add 32 days to be safe", "in df.temp] df['hdd65'] = [(65.0 - x)/24.0 if x<65.0 else 0.0 for x", "Start: {}'.format(datetime.now().ctime())) # path to this directory APP_PATH = dirname(realpath(__file__)) # URL to", "being included. Missing data is filled in with the average value for the", "a web server, you can read it with the following code: import pandas", "and month df_final.reset_index(inplace=True) df_final.sort_values(['station', 'month'], inplace=True) df_final.set_index('station', inplace=True) # Save the DataFrame as", "month. The columns of the DataFrame are \"hdd65\" and \"hdd60\" to designate base", "from the average of the data present. The column 'coverage' indicates the fraction", "of each month that has data dfc = df.resample('1M').count() dfc['total_hours'] = [i.day *", "add 32 days to be safe # get degree days for missing months", "server, you can read it with the following code: import pandas as pd", "dfm.index = mos dfm.index.name = 'month' else: raise ValueError(str(resp['data'])) return dfm if __name__", "Once you have a DataFrame, you can extract that portion of the DataFrame", "schedules the script to run on the first day of the month so", "df_new.index = [stn] * len(df_new) df_new.index.name = 'station' df_new.drop(columns=['coverage'], inplace=True) # add it", "dfm.index] dfm.index = mos dfm.index.name = 'month' else: raise ValueError(str(resp['data'])) return dfm if", "one df_new.reset_index(inplace=True) df_new.index = [stn] * len(df_new) df_new.index.name = 'station' df_new.drop(columns=['coverage'], inplace=True) #", "dfm.index.name = 'month' else: raise ValueError(str(resp['data'])) return dfm if __name__ == '__main__': df_exist", "inplace=True) # Save the DataFrame as a compressed pickle and a CSV file.", "second is base 65 deg F values. This script will acquire temperature data", "for each month returned, that being the first day of the month. The", "new degree-day information comes from the AHFC BMON site, https://bms.ahfc.us . This script", "continue through the end of available data. In the returned DataFrame, the index", "this directory APP_PATH = dirname(realpath(__file__)) # URL to the AHFC BMON site API", "the DataFrame is available on a local drive: import pandas as pd df", "next_mo).query('coverage > @MIN_COVERAGE').copy() if len(df_new): # put this DataFrame in a form that", "temperature data is the 4-letter station code with '_temp' appended, e.g. 'PAMR_temp'. The", "1028.027773 1183.027773 The index is the National Weather Service 4-letter station code. The", "degree-days are shown. 'hdd60' and 'hdd65' are the heating degree-day values: the first", "1257.648675 1397.648675 PAED 2018-03-01 1028.027773 1183.027773 The index is the National Weather Service", "df = pd.read_pickle('degree_days.pkl', compression='bz2') If the file is located on a web server,", "Dataframe of monthly heating degree-day values for 'stn' (a NWS weather site code).", "the pickled DataFrame already exists and has the following format: month hdd60 hdd65", "date identifying the month whose degree-days are shown. 'hdd60' and 'hdd65' are the", "is a first-of-the-month date identifying the month whose degree-days are shown. 'hdd60' and", "recent months not already present in the DataFrame. All stations found in the", "amount of data coverage a month must have before being included. Missing data", "column 'coverage' indicates the fraction of the months hours that actually have data.", "dfm if __name__ == '__main__': df_exist = pd.read_pickle(join(APP_PATH, 'data/degree_days.pkl'), compression='bz2') # list of", "start in the month that 'start_date' (Python date/time object) falls in and continue", "a form that can be concatenated to the existing one df_new.reset_index(inplace=True) df_new.index =", "with '_temp' appended, e.g. 'PAMR_temp'. The MIN_COVERAGE constant in the script controls the", "= mos dfm.index.name = 'month' else: raise ValueError(str(resp['data'])) return dfm if __name__ ==", "has a timestamp for each month returned, that being the first day of", "the fraction of the months hours that actually have data. \"\"\" # get", "available. Dont' run the script late in the month or a partial month", "dfc.total_hours dfm['hdd65'] = dfm.hdd65 * dfc.total_hours # Convert index timestamps to beginning of", "back to the main dataframe to calc degree-days df['hdd60'] = [(60.0 - x)/24.0", "if the DataFrame is available on a local drive: import pandas as pd", "above technique) To extract one site with a subset of the months: df_one_site", "that the prior month's degree days will be available. Dont' run the script", "a month must have before being included. Missing data is filled in with", "return dfm if __name__ == '__main__': df_exist = pd.read_pickle(join(APP_PATH, 'data/degree_days.pkl'), compression='bz2') # list", "DataFrame in a form that can be concatenated to the existing one df_new.reset_index(inplace=True)", "in and continue through the end of available data. In the returned DataFrame,", "read it with the following code: import pandas as pd import requests from", "the month or a partial month may be prematurely added to the DataFrame", "'data/degree_days.pkl'), compression='bz2') # list of new DataFrames to add to the existing one", "'month' column is a first-of-the-month date identifying the month whose degree-days are shown.", "compression='bz2') # list of new DataFrames to add to the existing one new_dfs", "of the month mos = [datetime(d.year, d.month, 1) for d in dfm.index] dfm.index", "run the script late in the month or a partial month may be", "each month that has data dfc = df.resample('1M').count() dfc['total_hours'] = [i.day * 24", "pandas as pd df = pd.read_pickle('degree_days.pkl', compression='bz2') If the file is located on", "coverage a month must have before being included. Missing data is filled in", "the degree-days for the most recent months not already present in the DataFrame.", "get it sorted by station and month df_final.reset_index(inplace=True) df_final.sort_values(['station', 'month'], inplace=True) df_final.set_index('station', inplace=True)", "if x<65.0 else 0.0 for x in df.temp] df.drop(['temp'], axis=1, inplace=True) dfm =", "dfc['total_hours'] = [i.day * 24 for i in dfc.index] # index is last", "'start_date' (Python date/time object) falls in and continue through the end of available", "be available. Dont' run the script late in the month or a partial", "(slower than above technique) To extract one site with a subset of the", "The new degree-day information comes from the AHFC BMON site, https://bms.ahfc.us . This", "index of the DataFrame will be updated. The script assumes that the BMON", "# degree-day DataFrame new_dfs.append(df_new) print('{} new months'.format(len(df_new))) else: print() except: print('{}: {}'.format(*sys.exc_info()[:2])) #", "from datetime import datetime, timedelta import pandas as pd import requests # Minimum", "This script will acquire temperature data from the AHFC BMON site in order", "site by: df_one_site = df.loc['PAMR'] or df_one_site = df.query(\"station == 'PAMR'\") (slower than", "to eventually add to the # degree-day DataFrame new_dfs.append(df_new) print('{} new months'.format(len(df_new))) else:", "the script late in the month or a partial month may be prematurely", "__name__ == '__main__': df_exist = pd.read_pickle(join(APP_PATH, 'data/degree_days.pkl'), compression='bz2') # list of new DataFrames", "will be available. Dont' run the script late in the month or a", "pd.concat([df_exist] + new_dfs) # get it sorted by station and month df_final.reset_index(inplace=True) df_final.sort_values(['station',", "month so that the prior month's degree days will be available. Dont' run", "Service 4-letter station code. The 'month' column is a first-of-the-month date identifying the", "appended, e.g. 'PAMR_temp'. The MIN_COVERAGE constant in the script controls the minimum amount", "month present for this station last_mo = df_exist.loc[stn].month.max() # get a date in", "df_exist.loc[stn].month.max() # get a date in the following month next_mo = last_mo +", "# Create a new DataFrame that combines the existing data with the new.", "put this DataFrame in a form that can be concatenated to the existing", "are shown. 'hdd60' and 'hdd65' are the heating degree-day values: the first is", "script will acquire temperature data from the AHFC BMON site in order to", "df.temp] df['hdd65'] = [(65.0 - x)/24.0 if x<65.0 else 0.0 for x in", "format: month hdd60 hdd65 station PAED 2018-02-01 1257.648675 1397.648675 PAED 2018-03-01 1028.027773 1183.027773", "values to a pickled Pandas DataFrame with the path 'data/degree_days.pkl' (compression = 'bz2').", "below. This script assumes the pickled DataFrame already exists and has the following", "values and the second is base 65 deg F values. This script will", "to this directory APP_PATH = dirname(realpath(__file__)) # URL to the AHFC BMON site", "65 deg F values. This script will acquire temperature data from the AHFC", "the hours in a month that must have data in order # to", "dfm.hdd60 * dfc.total_hours dfm['hdd65'] = dfm.hdd65 * dfc.total_hours # Convert index timestamps to", "print('Processing {}: '.format(stn), end='') try: # get last month present for this station", "d = pd.read_pickle(BytesIO(b), compression='bz2') Once you have a DataFrame, you can extract that", "the percentage of each month that has data dfc = df.resample('1M').count() dfc['total_hours'] =", "add it to the list of new DataFrames to eventually add to the", "df.resample('1M').count() dfc['total_hours'] = [i.day * 24 for i in dfc.index] # index is", "located on a web server, you can read it with the following code:", "at 'data/degree_days.csv'. The new degree-day information comes from the AHFC BMON site, https://bms.ahfc.us", "the months hours that actually have data. \"\"\" # get beginning of month", "# get beginning of month st_dt_1 = start_date.replace(day=1, hour=0, minute=0, second=0, microsecond=0) params", "days to be safe # get degree days for missing months df_new =", "to one site by: df_one_site = df.loc['PAMR'] or df_one_site = df.query(\"station == 'PAMR'\")", "be updated. The script assumes that the BMON sensor ID for a weather", "df.query(\"station == 'PAMR'\") (slower than above technique) To extract one site with a", "to designate base 65 F degree-days and base 60 F degree-days. Temperature data", "you can read it with the following code: import pandas as pd import", "df_new.index.name = 'station' df_new.drop(columns=['coverage'], inplace=True) # add it to the list of new", "and has the following format: month hdd60 hdd65 station PAED 2018-02-01 1257.648675 1397.648675", "= 'month' else: raise ValueError(str(resp['data'])) return dfm if __name__ == '__main__': df_exist =", "portion of the DataFrame that applies to one site by: df_one_site = df.loc['PAMR']", "the DataFrame. All stations found in the index of the DataFrame will be", "columns of the DataFrame are \"hdd65\" and \"hdd60\" to designate base 65 F", "as pd import requests # Minimum fraction of the hours in a month", "Convert index timestamps to beginning of the month mos = [datetime(d.year, d.month, 1)", "1397.648675 PAED 2018-03-01 1028.027773 1183.027773 The index is the National Weather Service 4-letter", "To read this DataFrame back into a Python script, you can excecute the", "has data dfc = df.resample('1M').count() dfc['total_hours'] = [i.day * 24 for i in", "b = requests.get('http://ahfc.webfactional.com/data/degree_days.pkl').content d = pd.read_pickle(BytesIO(b), compression='bz2') Once you have a DataFrame, you", "or df_one_site = df.query(\"station == 'PAMR'\") (slower than above technique) To extract one", "than above technique) To extract one site with a subset of the months:", "with the following code: import pandas as pd import requests from io import", "datetime, timedelta import pandas as pd import requests # Minimum fraction of the", "extract one site with a subset of the months: df_one_site = df.query(\"station ==", "have data in order # to include the month. MIN_COVERAGE = 0.7 print('\\nScript", "a local drive: import pandas as pd df = pd.read_pickle('degree_days.pkl', compression='bz2') If the" ]
[ "for move in moves: child = deepcopy(board) self.move_black(child, *move) score = -self.negamax(child, depth", "self.BEST_MOVE = moves[0] for move in moves: child = deepcopy(board) self.move_black(child, *move) v", "if depth == 0 or self.game_over(board): return color * self.evaluate(board) v = float('-inf')", "moves = self.generate_black_moves(board) self.BEST_MOVE = moves[0] for move in moves: child = deepcopy(board)", "def negamax(self, board, depth, color): if depth == 0 or self.game_over(board): return color", "-beta, -alpha, -color) if score >= alpha: alpha = score self.BEST_MOVE = move", "cPickle from Board import Actions class Negamax_AB(Actions): BEST_MOVE = None def __init__(self): Actions.__init__(self)", "Negamax_AB(Actions): BEST_MOVE = None def __init__(self): Actions.__init__(self) def __deepcopy__(self, memodict={}): # faster than", "None def __init__(self): Actions.__init__(self) def __deepcopy__(self, memodict={}): # faster than built-in g =", "def negamax(self, board, depth, alpha, beta, color): if depth <= 0 or self.game_over(board):", "-alpha, -color) if score >= alpha: alpha = score self.BEST_MOVE = move if", "return alpha def evaluate(self, board): # for AI # sum(my pieces) - sum(oponent", "self.evaluate(board) v = float('-inf') moves = self.generate_black_moves(board) self.BEST_MOVE = moves[0] for move in", "color): if depth <= 0 or self.game_over(board): return color * self.evaluate(board) moves =", "move return v def evaluate(self, board): # for AI # sum(my pieces) -", "color): if depth == 0 or self.game_over(board): return color * self.evaluate(board) v =", "self.BEST_MOVE = move if alpha >= beta: break return alpha def evaluate(self, board):", "deepcopy(board) self.move_black(child, *move) v = max(v, -self.negamax(child, depth - 1, -color)) self.BEST_MOVE =", "moves: child = deepcopy(board) self.move_black(child, *move) v = max(v, -self.negamax(child, depth - 1,", "won\" return True elif not black_pieces: print \"White has won\" return True return", "print \"White has won\" return True return False def negamax(self, board, depth, alpha,", "= move return v def evaluate(self, board): # for AI # sum(my pieces)", "cPickle.loads(cPickle.dumps(self, -1)) return g def game_over(self, board): white_pieces = self.get_white_pieces(board) black_pieces = self.get_black_pieces(board)", "move in moves: child = deepcopy(board) self.move_black(child, *move) score = -self.negamax(child, depth -", "-self.negamax(child, depth - 1, -beta, -alpha, -color) if score >= alpha: alpha =", "* self.evaluate(board) v = float('-inf') moves = self.generate_black_moves(board) self.BEST_MOVE = moves[0] for move", "or self.game_over(board): return color * self.evaluate(board) v = float('-inf') moves = self.generate_black_moves(board) self.BEST_MOVE", "import cPickle from Board import Actions class Negamax_AB(Actions): BEST_MOVE = None def __init__(self):", "copy import deepcopy import cPickle from Board import Actions class Negamax_AB(Actions): BEST_MOVE =", "import Actions class Negamax_AB(Actions): BEST_MOVE = None def __init__(self): Actions.__init__(self) def __deepcopy__(self, memodict={}):", "return True elif not black_pieces: print \"White has won\" return True return False", "Actions class Negamax_AB(Actions): BEST_MOVE = None def __init__(self): Actions.__init__(self) def __deepcopy__(self, memodict={}): #", "has won\" return True return False def negamax(self, board, depth, alpha, beta, color):", "v def evaluate(self, board): # for AI # sum(my pieces) - sum(oponent pieces)", "False def negamax(self, board, depth, alpha, beta, color): if depth <= 0 or", "return True return False def negamax(self, board, depth, color): if depth == 0", "__deepcopy__(self, memodict={}): # faster than built-in g = cPickle.loads(cPickle.dumps(self, -1)) return g def", "AI # sum(my pieces) - sum(oponent pieces) return len(self.get_black_pieces(board)) - len(self.get_white_pieces(board)) class Negamax(Actions):", "return color * self.evaluate(board) moves = self.generate_black_moves(board) for move in moves: child =", "child = deepcopy(board) self.move_black(child, *move) v = max(v, -self.negamax(child, depth - 1, -color))", "-color) if score >= alpha: alpha = score self.BEST_MOVE = move if alpha", "import deepcopy import cPickle from Board import Actions class Negamax_AB(Actions): BEST_MOVE = None", "faster than built-in g = cPickle.loads(cPickle.dumps(self, -1)) return g def game_over(self, board): white_pieces", "self.move_black(child, *move) score = -self.negamax(child, depth - 1, -beta, -alpha, -color) if score", "= score self.BEST_MOVE = move if alpha >= beta: break return alpha def", "* self.evaluate(board) moves = self.generate_black_moves(board) for move in moves: child = deepcopy(board) self.move_black(child,", "beta, color): if depth <= 0 or self.game_over(board): return color * self.evaluate(board) moves", "__init__(self): Actions.__init__(self) def __deepcopy__(self, memodict={}): # faster than built-in g = cPickle.loads(cPickle.dumps(self, -1))", "has won\" return True elif not black_pieces: print \"White has won\" return True", "= self.get_black_pieces(board) if not white_pieces: print \"Black has won\" return True elif not", "evaluate(self, board): # for AI # sum(my pieces) - sum(oponent pieces) return len(self.get_black_pieces(board))", "depth == 0 or self.game_over(board): return color * self.evaluate(board) v = float('-inf') moves", "deepcopy(board) self.move_black(child, *move) score = -self.negamax(child, depth - 1, -beta, -alpha, -color) if", "-self.negamax(child, depth - 1, -color)) self.BEST_MOVE = move return v def evaluate(self, board):", "*move) score = -self.negamax(child, depth - 1, -beta, -alpha, -color) if score >=", "0 or self.game_over(board): return color * self.evaluate(board) v = float('-inf') moves = self.generate_black_moves(board)", "- 1, -beta, -alpha, -color) if score >= alpha: alpha = score self.BEST_MOVE", "score = -self.negamax(child, depth - 1, -beta, -alpha, -color) if score >= alpha:", "float('-inf') moves = self.generate_black_moves(board) self.BEST_MOVE = moves[0] for move in moves: child =", "= deepcopy(board) self.move_black(child, *move) v = max(v, -self.negamax(child, depth - 1, -color)) self.BEST_MOVE", "= cPickle.loads(cPickle.dumps(self, -1)) return g def game_over(self, board): white_pieces = self.get_white_pieces(board) black_pieces =", "board, depth, alpha, beta, color): if depth <= 0 or self.game_over(board): return color", "self.BEST_MOVE = move return v def evaluate(self, board): # for AI # sum(my", "board): white_pieces = self.get_white_pieces(board) black_pieces = self.get_black_pieces(board) if not white_pieces: print \"Black has", "board, depth, color): if depth == 0 or self.game_over(board): return color * self.evaluate(board)", "\"White has won\" return True return False def negamax(self, board, depth, alpha, beta,", "self.generate_black_moves(board) self.BEST_MOVE = moves[0] for move in moves: child = deepcopy(board) self.move_black(child, *move)", "- 1, -color)) self.BEST_MOVE = move return v def evaluate(self, board): # for", "alpha = score self.BEST_MOVE = move if alpha >= beta: break return alpha", "won\" return True return False def negamax(self, board, depth, alpha, beta, color): if", "depth, alpha, beta, color): if depth <= 0 or self.game_over(board): return color *", "depth - 1, -color)) self.BEST_MOVE = move return v def evaluate(self, board): #", "from copy import deepcopy import cPickle from Board import Actions class Negamax_AB(Actions): BEST_MOVE", "white_pieces = self.get_white_pieces(board) black_pieces = self.get_black_pieces(board) if not white_pieces: print \"Black has won\"", "alpha, beta, color): if depth <= 0 or self.game_over(board): return color * self.evaluate(board)", "self.evaluate(board) moves = self.generate_black_moves(board) for move in moves: child = deepcopy(board) self.move_black(child, *move)", "- len(self.get_white_pieces(board)) class Negamax(Actions): BEST_MOVE = None def __init__(self): Actions.__init__(self) def __deepcopy__(self, memodict={}):", "len(self.get_white_pieces(board)) class Negamax(Actions): BEST_MOVE = None def __init__(self): Actions.__init__(self) def __deepcopy__(self, memodict={}): #", "# for AI # sum(my pieces) - sum(oponent pieces) return len(self.get_black_pieces(board)) - len(self.get_white_pieces(board))", "<= 0 or self.game_over(board): return color * self.evaluate(board) moves = self.generate_black_moves(board) for move", "than built-in g = cPickle.loads(cPickle.dumps(self, -1)) return g def game_over(self, board): white_pieces =", "alpha: alpha = score self.BEST_MOVE = move if alpha >= beta: break return", "g = cPickle.loads(cPickle.dumps(self, -1)) return g def game_over(self, board): white_pieces = self.get_white_pieces(board) black_pieces", "black_pieces: print \"White has won\" return True return False def negamax(self, board, depth,", "0 or self.game_over(board): return color * self.evaluate(board) moves = self.generate_black_moves(board) for move in", "= deepcopy(board) self.move_black(child, *move) score = -self.negamax(child, depth - 1, -beta, -alpha, -color)", "def evaluate(self, board): # for AI # sum(my pieces) - sum(oponent pieces) return", "max(v, -self.negamax(child, depth - 1, -color)) self.BEST_MOVE = move return v def evaluate(self,", "def __deepcopy__(self, memodict={}): # faster than built-in g = cPickle.loads(cPickle.dumps(self, -1)) return g", "return len(self.get_black_pieces(board)) - len(self.get_white_pieces(board)) class Negamax(Actions): BEST_MOVE = None def __init__(self): Actions.__init__(self) def", "self.generate_black_moves(board) for move in moves: child = deepcopy(board) self.move_black(child, *move) score = -self.negamax(child,", "not white_pieces: print \"Black has won\" return True elif not black_pieces: print \"White", "v = max(v, -self.negamax(child, depth - 1, -color)) self.BEST_MOVE = move return v", "move if alpha >= beta: break return alpha def evaluate(self, board): # for", "white_pieces: print \"Black has won\" return True elif not black_pieces: print \"White has", "in moves: child = deepcopy(board) self.move_black(child, *move) v = max(v, -self.negamax(child, depth -", "child = deepcopy(board) self.move_black(child, *move) score = -self.negamax(child, depth - 1, -beta, -alpha,", "True return False def negamax(self, board, depth, alpha, beta, color): if depth <=", "if depth <= 0 or self.game_over(board): return color * self.evaluate(board) moves = self.generate_black_moves(board)", "pieces) - sum(oponent pieces) return len(self.get_black_pieces(board)) - len(self.get_white_pieces(board)) class Negamax(Actions): BEST_MOVE = None", "or self.game_over(board): return color * self.evaluate(board) moves = self.generate_black_moves(board) for move in moves:", "class Negamax_AB(Actions): BEST_MOVE = None def __init__(self): Actions.__init__(self) def __deepcopy__(self, memodict={}): # faster", ">= alpha: alpha = score self.BEST_MOVE = move if alpha >= beta: break", "1, -beta, -alpha, -color) if score >= alpha: alpha = score self.BEST_MOVE =", "score self.BEST_MOVE = move if alpha >= beta: break return alpha def evaluate(self,", "Board import Actions class Negamax_AB(Actions): BEST_MOVE = None def __init__(self): Actions.__init__(self) def __deepcopy__(self,", "True return False def negamax(self, board, depth, color): if depth == 0 or", "if not white_pieces: print \"Black has won\" return True elif not black_pieces: print", "self.get_white_pieces(board) black_pieces = self.get_black_pieces(board) if not white_pieces: print \"Black has won\" return True", "# sum(my pieces) - sum(oponent pieces) return len(self.get_black_pieces(board)) - len(self.get_white_pieces(board)) class Negamax(Actions): BEST_MOVE", "False def negamax(self, board, depth, color): if depth == 0 or self.game_over(board): return", "beta: break return alpha def evaluate(self, board): # for AI # sum(my pieces)", "== 0 or self.game_over(board): return color * self.evaluate(board) v = float('-inf') moves =", "BEST_MOVE = None def __init__(self): Actions.__init__(self) def __deepcopy__(self, memodict={}): # faster than built-in", "for move in moves: child = deepcopy(board) self.move_black(child, *move) v = max(v, -self.negamax(child,", "elif not black_pieces: print \"White has won\" return True return False def negamax(self,", "= move if alpha >= beta: break return alpha def evaluate(self, board): #", "= -self.negamax(child, depth - 1, -beta, -alpha, -color) if score >= alpha: alpha", "sum(my pieces) - sum(oponent pieces) return len(self.get_black_pieces(board)) - len(self.get_white_pieces(board)) class Negamax(Actions): BEST_MOVE =", "moves = self.generate_black_moves(board) for move in moves: child = deepcopy(board) self.move_black(child, *move) score", "if alpha >= beta: break return alpha def evaluate(self, board): # for AI", "return g def game_over(self, board): white_pieces = self.get_white_pieces(board) black_pieces = self.get_black_pieces(board) if not", "color * self.evaluate(board) v = float('-inf') moves = self.generate_black_moves(board) self.BEST_MOVE = moves[0] for", "= moves[0] for move in moves: child = deepcopy(board) self.move_black(child, *move) v =", "depth, color): if depth == 0 or self.game_over(board): return color * self.evaluate(board) v", "v = float('-inf') moves = self.generate_black_moves(board) self.BEST_MOVE = moves[0] for move in moves:", "= float('-inf') moves = self.generate_black_moves(board) self.BEST_MOVE = moves[0] for move in moves: child", "-1)) return g def game_over(self, board): white_pieces = self.get_white_pieces(board) black_pieces = self.get_black_pieces(board) if", "Negamax(Actions): BEST_MOVE = None def __init__(self): Actions.__init__(self) def __deepcopy__(self, memodict={}): # faster than", "1, -color)) self.BEST_MOVE = move return v def evaluate(self, board): # for AI", "return False def negamax(self, board, depth, alpha, beta, color): if depth <= 0", "for AI # sum(my pieces) - sum(oponent pieces) return len(self.get_black_pieces(board)) - len(self.get_white_pieces(board)) class", "g def game_over(self, board): white_pieces = self.get_white_pieces(board) black_pieces = self.get_black_pieces(board) if not white_pieces:", "-color)) self.BEST_MOVE = move return v def evaluate(self, board): # for AI #", "= None def __init__(self): Actions.__init__(self) def __deepcopy__(self, memodict={}): # faster than built-in g", "black_pieces = self.get_black_pieces(board) if not white_pieces: print \"Black has won\" return True elif", "self.move_black(child, *move) v = max(v, -self.negamax(child, depth - 1, -color)) self.BEST_MOVE = move", "return False def negamax(self, board, depth, color): if depth == 0 or self.game_over(board):", "built-in g = cPickle.loads(cPickle.dumps(self, -1)) return g def game_over(self, board): white_pieces = self.get_white_pieces(board)", "print \"Black has won\" return True elif not black_pieces: print \"White has won\"", "print \"White has won\" return True return False def negamax(self, board, depth, color):", "return color * self.evaluate(board) v = float('-inf') moves = self.generate_black_moves(board) self.BEST_MOVE = moves[0]", "moves: child = deepcopy(board) self.move_black(child, *move) score = -self.negamax(child, depth - 1, -beta,", "\"Black has won\" return True elif not black_pieces: print \"White has won\" return", "- sum(oponent pieces) return len(self.get_black_pieces(board)) - len(self.get_white_pieces(board)) class Negamax(Actions): BEST_MOVE = None def", "deepcopy import cPickle from Board import Actions class Negamax_AB(Actions): BEST_MOVE = None def", "depth - 1, -beta, -alpha, -color) if score >= alpha: alpha = score", "= max(v, -self.negamax(child, depth - 1, -color)) self.BEST_MOVE = move return v def", "len(self.get_black_pieces(board)) - len(self.get_white_pieces(board)) class Negamax(Actions): BEST_MOVE = None def __init__(self): Actions.__init__(self) def __deepcopy__(self,", "negamax(self, board, depth, color): if depth == 0 or self.game_over(board): return color *", "= self.get_white_pieces(board) black_pieces = self.get_black_pieces(board) if not white_pieces: print \"Black has won\" return", "\"White has won\" return True return False def negamax(self, board, depth, color): if", "self.get_black_pieces(board) if not white_pieces: print \"Black has won\" return True elif not black_pieces:", "def game_over(self, board): white_pieces = self.get_white_pieces(board) black_pieces = self.get_black_pieces(board) if not white_pieces: print", "has won\" return True return False def negamax(self, board, depth, color): if depth", "memodict={}): # faster than built-in g = cPickle.loads(cPickle.dumps(self, -1)) return g def game_over(self,", "return True return False def negamax(self, board, depth, alpha, beta, color): if depth", "move in moves: child = deepcopy(board) self.move_black(child, *move) v = max(v, -self.negamax(child, depth", "self.game_over(board): return color * self.evaluate(board) moves = self.generate_black_moves(board) for move in moves: child", "class Negamax(Actions): BEST_MOVE = None def __init__(self): Actions.__init__(self) def __deepcopy__(self, memodict={}): # faster", "from Board import Actions class Negamax_AB(Actions): BEST_MOVE = None def __init__(self): Actions.__init__(self) def", "negamax(self, board, depth, alpha, beta, color): if depth <= 0 or self.game_over(board): return", "color * self.evaluate(board) moves = self.generate_black_moves(board) for move in moves: child = deepcopy(board)", "def __init__(self): Actions.__init__(self) def __deepcopy__(self, memodict={}): # faster than built-in g = cPickle.loads(cPickle.dumps(self,", "self.game_over(board): return color * self.evaluate(board) v = float('-inf') moves = self.generate_black_moves(board) self.BEST_MOVE =", "<filename>src/Negamax.py from copy import deepcopy import cPickle from Board import Actions class Negamax_AB(Actions):", "score >= alpha: alpha = score self.BEST_MOVE = move if alpha >= beta:", "alpha >= beta: break return alpha def evaluate(self, board): # for AI #", "True elif not black_pieces: print \"White has won\" return True return False def", "not black_pieces: print \"White has won\" return True return False def negamax(self, board,", ">= beta: break return alpha def evaluate(self, board): # for AI # sum(my", "pieces) return len(self.get_black_pieces(board)) - len(self.get_white_pieces(board)) class Negamax(Actions): BEST_MOVE = None def __init__(self): Actions.__init__(self)", "depth <= 0 or self.game_over(board): return color * self.evaluate(board) moves = self.generate_black_moves(board) for", "if score >= alpha: alpha = score self.BEST_MOVE = move if alpha >=", "= self.generate_black_moves(board) self.BEST_MOVE = moves[0] for move in moves: child = deepcopy(board) self.move_black(child,", "in moves: child = deepcopy(board) self.move_black(child, *move) score = -self.negamax(child, depth - 1,", "= self.generate_black_moves(board) for move in moves: child = deepcopy(board) self.move_black(child, *move) score =", "# faster than built-in g = cPickle.loads(cPickle.dumps(self, -1)) return g def game_over(self, board):", "*move) v = max(v, -self.negamax(child, depth - 1, -color)) self.BEST_MOVE = move return", "game_over(self, board): white_pieces = self.get_white_pieces(board) black_pieces = self.get_black_pieces(board) if not white_pieces: print \"Black", "break return alpha def evaluate(self, board): # for AI # sum(my pieces) -", "moves[0] for move in moves: child = deepcopy(board) self.move_black(child, *move) v = max(v,", "alpha def evaluate(self, board): # for AI # sum(my pieces) - sum(oponent pieces)", "sum(oponent pieces) return len(self.get_black_pieces(board)) - len(self.get_white_pieces(board)) class Negamax(Actions): BEST_MOVE = None def __init__(self):", "won\" return True return False def negamax(self, board, depth, color): if depth ==", "return v def evaluate(self, board): # for AI # sum(my pieces) - sum(oponent", "Actions.__init__(self) def __deepcopy__(self, memodict={}): # faster than built-in g = cPickle.loads(cPickle.dumps(self, -1)) return", "board): # for AI # sum(my pieces) - sum(oponent pieces) return len(self.get_black_pieces(board)) -" ]
[ "ugly[-1]: i5 += 1 ugly.append(min(ugly[i2] * 2, ugly[i3] * 3, ugly[i5] * 5))", "{integer} def nthUglyNumber(self, n): ugly_number = 0 heap = [] heapq.heappush(heap, 1) for", "ugly = [1] i2 = i3 = i5 = 0 while len(ugly) <", "0 == 30**30 % num # V2 # Time: O(n) # Space: O(1)", "[1] i2 = i3 = i5 = 0 while len(ugly) < n: while", "class Solution2(object): ugly = sorted(2**a * 3**b * 5**c for a in range(32)", "= 0 while len(ugly) < n: while ugly[i2] * 2 <= ugly[-1]: i2", "i2 = i3 = i5 = 0 while len(ugly) < n: while ugly[i2]", "3 == 0: heapq.heappush(heap, ugly_number * 2) heapq.heappush(heap, ugly_number * 3) else: heapq.heappush(heap,", "Solution2(object): ugly = sorted(2**a * 3**b * 5**c for a in range(32) for", "return False if num == 1: return True if num % 2 ==", "number as well # IDEA : ITERATION class Solution(object): def isUgly(self, num): \"\"\"", "int :rtype: bool \"\"\" return num > 0 == 30**30 % num #", "< n: while ugly[i2] * 2 <= ugly[-1]: i2 += 1 while ugly[i3]", "2) heapq.heappush(heap, ugly_number * 3) heapq.heappush(heap, ugly_number * 5) return ugly_number def nthUglyNumber2(self,", "[2], [3], [5] ugly = 1 for u in heapq.merge(q2, q3, q5): if", "# https://blog.csdn.net/coder_orz/article/details/51317748 class Solution(object): def isUgly(self, num): \"\"\" :type num: int :rtype: bool", "3 <= ugly[-1]: i3 += 1 while ugly[i5] * 5 <= ugly[-1]: i5", "q5 += 5 * u, class Solution2(object): ugly = sorted(2**a * 3**b *", "+= 1 while ugly[i3] * 3 <= ugly[-1]: i3 += 1 while ugly[i5]", "n): ugly = [1] i2 = i3 = i5 = 0 while len(ugly)", "heap = [] heapq.heappush(heap, 1) for _ in range(n): ugly_number = heapq.heappop(heap) if", "2 <= ugly[-1]: i2 += 1 while ugly[i3] * 3 <= ugly[-1]: i3", "= [1] i2 = i3 = i5 = 0 while len(ugly) < n:", "2 == 0: heapq.heappush(heap, ugly_number * 2) elif ugly_number % 3 == 0:", "num%i == 0: num = num / i return True if num ==", "O(1) import heapq class Solution(object): # @param {integer} n # @return {integer} def", "number is an ugly number # if all its prime factors are within", "V0 # V1 # https://blog.csdn.net/coder_orz/article/details/51317748 # IDEA : \"unly number\" : a number", ":type num: int :rtype: bool \"\"\" if num <= 0: return False for", "== 1 else False # V1' # https://blog.csdn.net/coder_orz/article/details/51317748 # IDEA : RECURSION class", "prime factors are within [2, 3, 5]. # e.g. 6, 8 are ugly", "# https://blog.csdn.net/coder_orz/article/details/51317748 # IDEA : \"unly number\" : a number is an ugly", "ugly if u > ugly: ugly = u n -= 1 q2 +=", "# if all its prime factors are within [2, 3, 5]. # e.g.", "False if num == 1: return True if num % 2 == 0:", "heapq.heappush(heap, ugly_number * 5) return ugly_number def nthUglyNumber2(self, n): ugly = [1] i2", "in range(32) for b in range(20) for c in range(14)) def nthUglyNumber(self, n):", "* 3, ugly[i5] * 5)) return ugly[-1] def nthUglyNumber3(self, n): q2, q3, q5", "elif num % 3 == 0: return self.isUgly(num/3) elif num % 5 ==", "% 3 == 0: heapq.heappush(heap, ugly_number * 2) heapq.heappush(heap, ugly_number * 3) else:", "else: heapq.heappush(heap, ugly_number * 2) heapq.heappush(heap, ugly_number * 3) heapq.heappush(heap, ugly_number * 5)", "heapq class Solution(object): # @param {integer} n # @return {integer} def nthUglyNumber(self, n):", "ugly = 1 for u in heapq.merge(q2, q3, q5): if n == 1:", "if num <= 0: return False for i in [2, 3, 5]: while", "https://blog.csdn.net/coder_orz/article/details/51317748 # IDEA : \"unly number\" : a number is an ugly number", "if num == 1 else False # V1' # https://blog.csdn.net/coder_orz/article/details/51317748 # IDEA :", "3 == 0: return self.isUgly(num/3) elif num % 5 == 0: return self.isUgly(num/5)", "e.g. 6, 8 are ugly number ; while 14 is not # please", "return True if num == 1 else False # V1' # https://blog.csdn.net/coder_orz/article/details/51317748 #", "num <= 0: return False if num == 1: return True if num", "1: return ugly if u > ugly: ugly = u n -= 1", "2 == 0: return self.isUgly(num/2) elif num % 3 == 0: return self.isUgly(num/3)", "\"\"\" :type num: int :rtype: bool \"\"\" return num > 0 == 30**30", "if num == 1: return True if num % 2 == 0: return", "2) heapq.heappush(heap, ugly_number * 3) else: heapq.heappush(heap, ugly_number * 2) heapq.heappush(heap, ugly_number *", "heapq.heappush(heap, 1) for _ in range(n): ugly_number = heapq.heappop(heap) if ugly_number % 2", "* 5) return ugly_number def nthUglyNumber2(self, n): ugly = [1] i2 = i3", "bool \"\"\" if num <= 0: return False for i in [2, 3,", "within [2, 3, 5]. # e.g. 6, 8 are ugly number ; while", "== 0: return self.isUgly(num/3) elif num % 5 == 0: return self.isUgly(num/5) else:", "0: return self.isUgly(num/2) elif num % 3 == 0: return self.isUgly(num/3) elif num", "ugly[i5] * 5 <= ugly[-1]: i5 += 1 ugly.append(min(ugly[i2] * 2, ugly[i3] *", "self.isUgly(num/3) elif num % 5 == 0: return self.isUgly(num/5) else: return False #", "-= 1 q2 += 2 * u, q3 += 3 * u, q5", ":type num: int :rtype: bool \"\"\" return num > 0 == 30**30 %", "heapq.heappush(heap, ugly_number * 2) heapq.heappush(heap, ugly_number * 3) else: heapq.heappush(heap, ugly_number * 2)", "# V1' # https://blog.csdn.net/coder_orz/article/details/51317748 # IDEA : RECURSION class Solution(object): def isUgly(self, num):", "def nthUglyNumber2(self, n): ugly = [1] i2 = i3 = i5 = 0", ": a number is an ugly number # if all its prime factors", "/ i return True if num == 1 else False # V1' #", "== 0: num = num / i return True if num == 1", "V1 # https://blog.csdn.net/coder_orz/article/details/51317748 # IDEA : \"unly number\" : a number is an", "in [2, 3, 5]: while num%i == 0: num = num / i", "% num # V2 # Time: O(n) # Space: O(1) import heapq class", "range(n): ugly_number = heapq.heappop(heap) if ugly_number % 2 == 0: heapq.heappush(heap, ugly_number *", "num == 1: return True if num % 2 == 0: return self.isUgly(num/2)", "* 2, ugly[i3] * 3, ugly[i5] * 5)) return ugly[-1] def nthUglyNumber3(self, n):", "== 0: return self.isUgly(num/5) else: return False # V1'' # https://blog.csdn.net/coder_orz/article/details/51317748 class Solution(object):", "i5 += 1 ugly.append(min(ugly[i2] * 2, ugly[i3] * 3, ugly[i5] * 5)) return", "* 2) elif ugly_number % 3 == 0: heapq.heappush(heap, ugly_number * 2) heapq.heappush(heap,", "class Solution(object): def isUgly(self, num): \"\"\" :type num: int :rtype: bool \"\"\" return", "if n == 1: return ugly if u > ugly: ugly = u", "all its prime factors are within [2, 3, 5]. # e.g. 6, 8", "2 * u, q3 += 3 * u, q5 += 5 * u,", "q3 += 3 * u, q5 += 5 * u, class Solution2(object): ugly", "ugly_number = 0 heap = [] heapq.heappush(heap, 1) for _ in range(n): ugly_number", "# e.g. 6, 8 are ugly number ; while 14 is not #", "# IDEA : ITERATION class Solution(object): def isUgly(self, num): \"\"\" :type num: int", "ugly.append(min(ugly[i2] * 2, ugly[i3] * 3, ugly[i5] * 5)) return ugly[-1] def nthUglyNumber3(self,", "5**c for a in range(32) for b in range(20) for c in range(14))", "= [2], [3], [5] ugly = 1 for u in heapq.merge(q2, q3, q5):", "if num % 2 == 0: return self.isUgly(num/2) elif num % 3 ==", "len(ugly) < n: while ugly[i2] * 2 <= ugly[-1]: i2 += 1 while", "self.isUgly(num/2) elif num % 3 == 0: return self.isUgly(num/3) elif num % 5", "0: heapq.heappush(heap, ugly_number * 2) heapq.heappush(heap, ugly_number * 3) else: heapq.heappush(heap, ugly_number *", "num == 1 else False # V1' # https://blog.csdn.net/coder_orz/article/details/51317748 # IDEA : RECURSION", "ugly: ugly = u n -= 1 q2 += 2 * u, q3", "bool \"\"\" if num <= 0: return False if num == 1: return", "heapq.heappush(heap, ugly_number * 3) heapq.heappush(heap, ugly_number * 5) return ugly_number def nthUglyNumber2(self, n):", "Solution(object): # @param {integer} n # @return {integer} def nthUglyNumber(self, n): ugly_number =", "0: return self.isUgly(num/5) else: return False # V1'' # https://blog.csdn.net/coder_orz/article/details/51317748 class Solution(object): def", "\"\"\" :type num: int :rtype: bool \"\"\" if num <= 0: return False", "IDEA : RECURSION class Solution(object): def isUgly(self, num): \"\"\" :type num: int :rtype:", "# Time: O(n) # Space: O(1) import heapq class Solution(object): # @param {integer}", "+= 1 while ugly[i5] * 5 <= ugly[-1]: i5 += 1 ugly.append(min(ugly[i2] *", "3, 5]: while num%i == 0: num = num / i return True", "return False # V1'' # https://blog.csdn.net/coder_orz/article/details/51317748 class Solution(object): def isUgly(self, num): \"\"\" :type", "for a in range(32) for b in range(20) for c in range(14)) def", "* 3) else: heapq.heappush(heap, ugly_number * 2) heapq.heappush(heap, ugly_number * 3) heapq.heappush(heap, ugly_number", ": \"unly number\" : a number is an ugly number # if all", "> 0 == 30**30 % num # V2 # Time: O(n) # Space:", "elif ugly_number % 3 == 0: heapq.heappush(heap, ugly_number * 2) heapq.heappush(heap, ugly_number *", "while num%i == 0: num = num / i return True if num", "return ugly[-1] def nthUglyNumber3(self, n): q2, q3, q5 = [2], [3], [5] ugly", ": RECURSION class Solution(object): def isUgly(self, num): \"\"\" :type num: int :rtype: bool", "8 are ugly number ; while 14 is not # please note that", "% 2 == 0: return self.isUgly(num/2) elif num % 3 == 0: return", "range(32) for b in range(20) for c in range(14)) def nthUglyNumber(self, n): return", "n == 1: return ugly if u > ugly: ugly = u n", "5 == 0: return self.isUgly(num/5) else: return False # V1'' # https://blog.csdn.net/coder_orz/article/details/51317748 class", "its prime factors are within [2, 3, 5]. # e.g. 6, 8 are", "num): \"\"\" :type num: int :rtype: bool \"\"\" return num > 0 ==", "https://blog.csdn.net/coder_orz/article/details/51317748 # IDEA : RECURSION class Solution(object): def isUgly(self, num): \"\"\" :type num:", "<= ugly[-1]: i3 += 1 while ugly[i5] * 5 <= ugly[-1]: i5 +=", "0: num = num / i return True if num == 1 else", "* 5**c for a in range(32) for b in range(20) for c in", "return True if num % 2 == 0: return self.isUgly(num/2) elif num %", "@param {integer} n # @return {integer} def nthUglyNumber(self, n): ugly_number = 0 heap", "3, ugly[i5] * 5)) return ugly[-1] def nthUglyNumber3(self, n): q2, q3, q5 =", "> ugly: ugly = u n -= 1 q2 += 2 * u,", "n: while ugly[i2] * 2 <= ugly[-1]: i2 += 1 while ugly[i3] *", "<= 0: return False for i in [2, 3, 5]: while num%i ==", "# please note that 1 is ugly number as well # IDEA :", "@return {integer} def nthUglyNumber(self, n): ugly_number = 0 heap = [] heapq.heappush(heap, 1)", "= heapq.heappop(heap) if ugly_number % 2 == 0: heapq.heappush(heap, ugly_number * 2) elif", "# @return {integer} def nthUglyNumber(self, n): ugly_number = 0 heap = [] heapq.heappush(heap,", "u n -= 1 q2 += 2 * u, q3 += 3 *", "nthUglyNumber2(self, n): ugly = [1] i2 = i3 = i5 = 0 while", "5)) return ugly[-1] def nthUglyNumber3(self, n): q2, q3, q5 = [2], [3], [5]", "3) heapq.heappush(heap, ugly_number * 5) return ugly_number def nthUglyNumber2(self, n): ugly = [1]", "number\" : a number is an ugly number # if all its prime", "class Solution(object): def isUgly(self, num): \"\"\" :type num: int :rtype: bool \"\"\" if", "is not # please note that 1 is ugly number as well #", "while 14 is not # please note that 1 is ugly number as", "; while 14 is not # please note that 1 is ugly number", "<= ugly[-1]: i2 += 1 while ugly[i3] * 3 <= ugly[-1]: i3 +=", "u, q5 += 5 * u, class Solution2(object): ugly = sorted(2**a * 3**b", "2, ugly[i3] * 3, ugly[i5] * 5)) return ugly[-1] def nthUglyNumber3(self, n): q2,", "ugly_number * 3) else: heapq.heappush(heap, ugly_number * 2) heapq.heappush(heap, ugly_number * 3) heapq.heappush(heap,", "\"unly number\" : a number is an ugly number # if all its", "q2, q3, q5 = [2], [3], [5] ugly = 1 for u in", "factors are within [2, 3, 5]. # e.g. 6, 8 are ugly number", "num % 5 == 0: return self.isUgly(num/5) else: return False # V1'' #", "if ugly_number % 2 == 0: heapq.heappush(heap, ugly_number * 2) elif ugly_number %", "bool \"\"\" return num > 0 == 30**30 % num # V2 #", "ugly[i3] * 3 <= ugly[-1]: i3 += 1 while ugly[i5] * 5 <=", "num: int :rtype: bool \"\"\" if num <= 0: return False for i", "elif num % 5 == 0: return self.isUgly(num/5) else: return False # V1''", "False # V1'' # https://blog.csdn.net/coder_orz/article/details/51317748 class Solution(object): def isUgly(self, num): \"\"\" :type num:", "if num <= 0: return False if num == 1: return True if", "ugly_number % 3 == 0: heapq.heappush(heap, ugly_number * 2) heapq.heappush(heap, ugly_number * 3)", "2) elif ugly_number % 3 == 0: heapq.heappush(heap, ugly_number * 2) heapq.heappush(heap, ugly_number", "isUgly(self, num): \"\"\" :type num: int :rtype: bool \"\"\" if num <= 0:", "return ugly if u > ugly: ugly = u n -= 1 q2", "3**b * 5**c for a in range(32) for b in range(20) for c", "<= ugly[-1]: i5 += 1 ugly.append(min(ugly[i2] * 2, ugly[i3] * 3, ugly[i5] *", "ugly_number * 3) heapq.heappush(heap, ugly_number * 5) return ugly_number def nthUglyNumber2(self, n): ugly", "num % 2 == 0: return self.isUgly(num/2) elif num % 3 == 0:", "* 5 <= ugly[-1]: i5 += 1 ugly.append(min(ugly[i2] * 2, ugly[i3] * 3,", ":rtype: bool \"\"\" if num <= 0: return False for i in [2,", "n # @return {integer} def nthUglyNumber(self, n): ugly_number = 0 heap = []", "= i3 = i5 = 0 while len(ugly) < n: while ugly[i2] *", "n -= 1 q2 += 2 * u, q3 += 3 * u,", "== 30**30 % num # V2 # Time: O(n) # Space: O(1) import", "please note that 1 is ugly number as well # IDEA : ITERATION", "= [] heapq.heappush(heap, 1) for _ in range(n): ugly_number = heapq.heappop(heap) if ugly_number", "* 2) heapq.heappush(heap, ugly_number * 3) else: heapq.heappush(heap, ugly_number * 2) heapq.heappush(heap, ugly_number", "i return True if num == 1 else False # V1' # https://blog.csdn.net/coder_orz/article/details/51317748", "* 3 <= ugly[-1]: i3 += 1 while ugly[i5] * 5 <= ugly[-1]:", "q5 = [2], [3], [5] ugly = 1 for u in heapq.merge(q2, q3,", "u, class Solution2(object): ugly = sorted(2**a * 3**b * 5**c for a in", "return ugly_number def nthUglyNumber2(self, n): ugly = [1] i2 = i3 = i5", "= u n -= 1 q2 += 2 * u, q3 += 3", "self.isUgly(num/5) else: return False # V1'' # https://blog.csdn.net/coder_orz/article/details/51317748 class Solution(object): def isUgly(self, num):", "else False # V1' # https://blog.csdn.net/coder_orz/article/details/51317748 # IDEA : RECURSION class Solution(object): def", "1 q2 += 2 * u, q3 += 3 * u, q5 +=", "return self.isUgly(num/2) elif num % 3 == 0: return self.isUgly(num/3) elif num %", "i5 = 0 while len(ugly) < n: while ugly[i2] * 2 <= ugly[-1]:", "a number is an ugly number # if all its prime factors are", "ugly number ; while 14 is not # please note that 1 is", "note that 1 is ugly number as well # IDEA : ITERATION class", "num > 0 == 30**30 % num # V2 # Time: O(n) #", "ugly number # if all its prime factors are within [2, 3, 5].", "num): \"\"\" :type num: int :rtype: bool \"\"\" if num <= 0: return", "ugly = u n -= 1 q2 += 2 * u, q3 +=", "<= 0: return False if num == 1: return True if num %", "1 for u in heapq.merge(q2, q3, q5): if n == 1: return ugly", "* 3**b * 5**c for a in range(32) for b in range(20) for", "q3, q5): if n == 1: return ugly if u > ugly: ugly", "q5): if n == 1: return ugly if u > ugly: ugly =", "heapq.heappush(heap, ugly_number * 2) heapq.heappush(heap, ugly_number * 3) heapq.heappush(heap, ugly_number * 5) return", "ugly_number * 5) return ugly_number def nthUglyNumber2(self, n): ugly = [1] i2 =", "for i in [2, 3, 5]: while num%i == 0: num = num", "ugly[i3] * 3, ugly[i5] * 5)) return ugly[-1] def nthUglyNumber3(self, n): q2, q3,", "def nthUglyNumber3(self, n): q2, q3, q5 = [2], [3], [5] ugly = 1", "heapq.heappop(heap) if ugly_number % 2 == 0: heapq.heappush(heap, ugly_number * 2) elif ugly_number", "heapq.merge(q2, q3, q5): if n == 1: return ugly if u > ugly:", "5 <= ugly[-1]: i5 += 1 ugly.append(min(ugly[i2] * 2, ugly[i3] * 3, ugly[i5]", "n): ugly_number = 0 heap = [] heapq.heappush(heap, 1) for _ in range(n):", "num % 3 == 0: return self.isUgly(num/3) elif num % 5 == 0:", "_ in range(n): ugly_number = heapq.heappop(heap) if ugly_number % 2 == 0: heapq.heappush(heap,", "1 is ugly number as well # IDEA : ITERATION class Solution(object): def", ": ITERATION class Solution(object): def isUgly(self, num): \"\"\" :type num: int :rtype: bool", "# V1 # https://blog.csdn.net/coder_orz/article/details/51317748 # IDEA : \"unly number\" : a number is", "V2 # Time: O(n) # Space: O(1) import heapq class Solution(object): # @param", "heapq.heappush(heap, ugly_number * 2) elif ugly_number % 3 == 0: heapq.heappush(heap, ugly_number *", "for _ in range(n): ugly_number = heapq.heappop(heap) if ugly_number % 2 == 0:", "1: return True if num % 2 == 0: return self.isUgly(num/2) elif num", "n): q2, q3, q5 = [2], [3], [5] ugly = 1 for u", "* 2) heapq.heappush(heap, ugly_number * 3) heapq.heappush(heap, ugly_number * 5) return ugly_number def", "0: return False for i in [2, 3, 5]: while num%i == 0:", "[2, 3, 5]: while num%i == 0: num = num / i return", "3, 5]. # e.g. 6, 8 are ugly number ; while 14 is", "3) else: heapq.heappush(heap, ugly_number * 2) heapq.heappush(heap, ugly_number * 3) heapq.heappush(heap, ugly_number *", "sorted(2**a * 3**b * 5**c for a in range(32) for b in range(20)", "\"\"\" return num > 0 == 30**30 % num # V2 # Time:", "== 0: heapq.heappush(heap, ugly_number * 2) heapq.heappush(heap, ugly_number * 3) else: heapq.heappush(heap, ugly_number", "+= 1 ugly.append(min(ugly[i2] * 2, ugly[i3] * 3, ugly[i5] * 5)) return ugly[-1]", "ugly_number * 2) heapq.heappush(heap, ugly_number * 3) else: heapq.heappush(heap, ugly_number * 2) heapq.heappush(heap,", "not # please note that 1 is ugly number as well # IDEA", "i3 = i5 = 0 while len(ugly) < n: while ugly[i2] * 2", "def nthUglyNumber(self, n): ugly_number = 0 heap = [] heapq.heappush(heap, 1) for _", "heapq.heappush(heap, ugly_number * 3) else: heapq.heappush(heap, ugly_number * 2) heapq.heappush(heap, ugly_number * 3)", "ugly = sorted(2**a * 3**b * 5**c for a in range(32) for b", "while len(ugly) < n: while ugly[i2] * 2 <= ugly[-1]: i2 += 1", "is ugly number as well # IDEA : ITERATION class Solution(object): def isUgly(self,", "+= 2 * u, q3 += 3 * u, q5 += 5 *", "num: int :rtype: bool \"\"\" return num > 0 == 30**30 % num", "Space: O(1) import heapq class Solution(object): # @param {integer} n # @return {integer}", "num <= 0: return False for i in [2, 3, 5]: while num%i", "num = num / i return True if num == 1 else False", "is an ugly number # if all its prime factors are within [2,", "1 else False # V1' # https://blog.csdn.net/coder_orz/article/details/51317748 # IDEA : RECURSION class Solution(object):", ":rtype: bool \"\"\" if num <= 0: return False if num == 1:", "# V2 # Time: O(n) # Space: O(1) import heapq class Solution(object): #", "ugly[i5] * 5)) return ugly[-1] def nthUglyNumber3(self, n): q2, q3, q5 = [2],", "ugly number as well # IDEA : ITERATION class Solution(object): def isUgly(self, num):", "# @param {integer} n # @return {integer} def nthUglyNumber(self, n): ugly_number = 0", "else: return False # V1'' # https://blog.csdn.net/coder_orz/article/details/51317748 class Solution(object): def isUgly(self, num): \"\"\"", "while ugly[i3] * 3 <= ugly[-1]: i3 += 1 while ugly[i5] * 5", "for u in heapq.merge(q2, q3, q5): if n == 1: return ugly if", "ugly_number def nthUglyNumber2(self, n): ugly = [1] i2 = i3 = i5 =", "+= 3 * u, q5 += 5 * u, class Solution2(object): ugly =", "0 heap = [] heapq.heappush(heap, 1) for _ in range(n): ugly_number = heapq.heappop(heap)", "IDEA : \"unly number\" : a number is an ugly number # if", "def isUgly(self, num): \"\"\" :type num: int :rtype: bool \"\"\" if num <=", "nthUglyNumber3(self, n): q2, q3, q5 = [2], [3], [5] ugly = 1 for", ":type num: int :rtype: bool \"\"\" if num <= 0: return False if", "ugly_number = heapq.heappop(heap) if ugly_number % 2 == 0: heapq.heappush(heap, ugly_number * 2)", "{integer} n # @return {integer} def nthUglyNumber(self, n): ugly_number = 0 heap =", "ugly[-1] def nthUglyNumber3(self, n): q2, q3, q5 = [2], [3], [5] ugly =", "0: return False if num == 1: return True if num % 2", "in heapq.merge(q2, q3, q5): if n == 1: return ugly if u >", "V1' # https://blog.csdn.net/coder_orz/article/details/51317748 # IDEA : RECURSION class Solution(object): def isUgly(self, num): \"\"\"", "return num > 0 == 30**30 % num # V2 # Time: O(n)", "i2 += 1 while ugly[i3] * 3 <= ugly[-1]: i3 += 1 while", "that 1 is ugly number as well # IDEA : ITERATION class Solution(object):", "% 5 == 0: return self.isUgly(num/5) else: return False # V1'' # https://blog.csdn.net/coder_orz/article/details/51317748", "= 0 heap = [] heapq.heappush(heap, 1) for _ in range(n): ugly_number =", "return False for i in [2, 3, 5]: while num%i == 0: num", "https://blog.csdn.net/coder_orz/article/details/51317748 class Solution(object): def isUgly(self, num): \"\"\" :type num: int :rtype: bool \"\"\"", "ITERATION class Solution(object): def isUgly(self, num): \"\"\" :type num: int :rtype: bool \"\"\"", "# IDEA : RECURSION class Solution(object): def isUgly(self, num): \"\"\" :type num: int", "[2, 3, 5]. # e.g. 6, 8 are ugly number ; while 14", "ugly_number % 2 == 0: heapq.heappush(heap, ugly_number * 2) elif ugly_number % 3", "O(n) # Space: O(1) import heapq class Solution(object): # @param {integer} n #", "3 * u, q5 += 5 * u, class Solution2(object): ugly = sorted(2**a", "a in range(32) for b in range(20) for c in range(14)) def nthUglyNumber(self,", "* 3) heapq.heappush(heap, ugly_number * 5) return ugly_number def nthUglyNumber2(self, n): ugly =", "14 is not # please note that 1 is ugly number as well", "5]: while num%i == 0: num = num / i return True if", "u in heapq.merge(q2, q3, q5): if n == 1: return ugly if u", "30**30 % num # V2 # Time: O(n) # Space: O(1) import heapq", "num # V2 # Time: O(n) # Space: O(1) import heapq class Solution(object):", "== 1: return ugly if u > ugly: ugly = u n -=", "\"\"\" if num <= 0: return False if num == 1: return True", "% 2 == 0: heapq.heappush(heap, ugly_number * 2) elif ugly_number % 3 ==", "True if num == 1 else False # V1' # https://blog.csdn.net/coder_orz/article/details/51317748 # IDEA", "* u, q5 += 5 * u, class Solution2(object): ugly = sorted(2**a *", "u, q3 += 3 * u, q5 += 5 * u, class Solution2(object):", "Time: O(n) # Space: O(1) import heapq class Solution(object): # @param {integer} n", "1 while ugly[i5] * 5 <= ugly[-1]: i5 += 1 ugly.append(min(ugly[i2] * 2,", "ugly[-1]: i3 += 1 while ugly[i5] * 5 <= ugly[-1]: i5 += 1", "[] heapq.heappush(heap, 1) for _ in range(n): ugly_number = heapq.heappop(heap) if ugly_number %", "# IDEA : \"unly number\" : a number is an ugly number #", "* 2 <= ugly[-1]: i2 += 1 while ugly[i3] * 3 <= ugly[-1]:", "= num / i return True if num == 1 else False #", "== 0: return self.isUgly(num/2) elif num % 3 == 0: return self.isUgly(num/3) elif", ":rtype: bool \"\"\" return num > 0 == 30**30 % num # V2", "Solution(object): def isUgly(self, num): \"\"\" :type num: int :rtype: bool \"\"\" return num", "+= 5 * u, class Solution2(object): ugly = sorted(2**a * 3**b * 5**c", "Solution(object): def isUgly(self, num): \"\"\" :type num: int :rtype: bool \"\"\" if num", "int :rtype: bool \"\"\" if num <= 0: return False if num ==", "[5] ugly = 1 for u in heapq.merge(q2, q3, q5): if n ==", "V1'' # https://blog.csdn.net/coder_orz/article/details/51317748 class Solution(object): def isUgly(self, num): \"\"\" :type num: int :rtype:", "import heapq class Solution(object): # @param {integer} n # @return {integer} def nthUglyNumber(self,", "q2 += 2 * u, q3 += 3 * u, q5 += 5", "ugly_number * 2) elif ugly_number % 3 == 0: heapq.heappush(heap, ugly_number * 2)", "False for i in [2, 3, 5]: while num%i == 0: num =", "# https://blog.csdn.net/coder_orz/article/details/51317748 # IDEA : RECURSION class Solution(object): def isUgly(self, num): \"\"\" :type", "num: int :rtype: bool \"\"\" if num <= 0: return False if num", "# Space: O(1) import heapq class Solution(object): # @param {integer} n # @return", "as well # IDEA : ITERATION class Solution(object): def isUgly(self, num): \"\"\" :type", "1 ugly.append(min(ugly[i2] * 2, ugly[i3] * 3, ugly[i5] * 5)) return ugly[-1] def", "# V0 # V1 # https://blog.csdn.net/coder_orz/article/details/51317748 # IDEA : \"unly number\" : a", "an ugly number # if all its prime factors are within [2, 3,", "ugly[-1]: i2 += 1 while ugly[i3] * 3 <= ugly[-1]: i3 += 1", "6, 8 are ugly number ; while 14 is not # please note", "number ; while 14 is not # please note that 1 is ugly", "int :rtype: bool \"\"\" if num <= 0: return False for i in", "* u, q3 += 3 * u, q5 += 5 * u, class", "nthUglyNumber(self, n): ugly_number = 0 heap = [] heapq.heappush(heap, 1) for _ in", "% 3 == 0: return self.isUgly(num/3) elif num % 5 == 0: return", "well # IDEA : ITERATION class Solution(object): def isUgly(self, num): \"\"\" :type num:", "== 1: return True if num % 2 == 0: return self.isUgly(num/2) elif", "def isUgly(self, num): \"\"\" :type num: int :rtype: bool \"\"\" return num >", "IDEA : ITERATION class Solution(object): def isUgly(self, num): \"\"\" :type num: int :rtype:", "if all its prime factors are within [2, 3, 5]. # e.g. 6,", "while ugly[i2] * 2 <= ugly[-1]: i2 += 1 while ugly[i3] * 3", "i in [2, 3, 5]: while num%i == 0: num = num /", "are ugly number ; while 14 is not # please note that 1", "for b in range(20) for c in range(14)) def nthUglyNumber(self, n): return self.ugly[n-1]", "== 0: heapq.heappush(heap, ugly_number * 2) elif ugly_number % 3 == 0: heapq.heappush(heap,", "while ugly[i5] * 5 <= ugly[-1]: i5 += 1 ugly.append(min(ugly[i2] * 2, ugly[i3]", "i3 += 1 while ugly[i5] * 5 <= ugly[-1]: i5 += 1 ugly.append(min(ugly[i2]", "class Solution(object): # @param {integer} n # @return {integer} def nthUglyNumber(self, n): ugly_number", "\"\"\" if num <= 0: return False for i in [2, 3, 5]:", "True if num % 2 == 0: return self.isUgly(num/2) elif num % 3", "isUgly(self, num): \"\"\" :type num: int :rtype: bool \"\"\" return num > 0", "# V1'' # https://blog.csdn.net/coder_orz/article/details/51317748 class Solution(object): def isUgly(self, num): \"\"\" :type num: int", "RECURSION class Solution(object): def isUgly(self, num): \"\"\" :type num: int :rtype: bool \"\"\"", "5]. # e.g. 6, 8 are ugly number ; while 14 is not", "5) return ugly_number def nthUglyNumber2(self, n): ugly = [1] i2 = i3 =", "ugly[i2] * 2 <= ugly[-1]: i2 += 1 while ugly[i3] * 3 <=", "q3, q5 = [2], [3], [5] ugly = 1 for u in heapq.merge(q2,", "if u > ugly: ugly = u n -= 1 q2 += 2", "1) for _ in range(n): ugly_number = heapq.heappop(heap) if ugly_number % 2 ==", "0 while len(ugly) < n: while ugly[i2] * 2 <= ugly[-1]: i2 +=", "in range(n): ugly_number = heapq.heappop(heap) if ugly_number % 2 == 0: heapq.heappush(heap, ugly_number", "* 5)) return ugly[-1] def nthUglyNumber3(self, n): q2, q3, q5 = [2], [3],", "num / i return True if num == 1 else False # V1'", "ugly_number * 2) heapq.heappush(heap, ugly_number * 3) heapq.heappush(heap, ugly_number * 5) return ugly_number", "= i5 = 0 while len(ugly) < n: while ugly[i2] * 2 <=", "1 while ugly[i3] * 3 <= ugly[-1]: i3 += 1 while ugly[i5] *", "[3], [5] ugly = 1 for u in heapq.merge(q2, q3, q5): if n", "return self.isUgly(num/5) else: return False # V1'' # https://blog.csdn.net/coder_orz/article/details/51317748 class Solution(object): def isUgly(self,", "5 * u, class Solution2(object): ugly = sorted(2**a * 3**b * 5**c for", "False # V1' # https://blog.csdn.net/coder_orz/article/details/51317748 # IDEA : RECURSION class Solution(object): def isUgly(self,", "u > ugly: ugly = u n -= 1 q2 += 2 *", "are within [2, 3, 5]. # e.g. 6, 8 are ugly number ;", "= sorted(2**a * 3**b * 5**c for a in range(32) for b in", "= 1 for u in heapq.merge(q2, q3, q5): if n == 1: return", "return self.isUgly(num/3) elif num % 5 == 0: return self.isUgly(num/5) else: return False", "* u, class Solution2(object): ugly = sorted(2**a * 3**b * 5**c for a", "0: heapq.heappush(heap, ugly_number * 2) elif ugly_number % 3 == 0: heapq.heappush(heap, ugly_number", "number # if all its prime factors are within [2, 3, 5]. #", "0: return self.isUgly(num/3) elif num % 5 == 0: return self.isUgly(num/5) else: return" ]
[ "dbcurs.fetchone() points.append((node[2], node[1])) center = None if len(points) == 2: line = LineString(points)", "= 5000 while offset < count : sql = \"SELECT * FROM ways", "lon, woeid, way_id)) dbconn.commit() time.sleep(2) offset += limit return if __name__ == '__main__'", "= dbconn.cursor() dbcurs.execute(\"SELECT COUNT(id) AS count FROM ways\") row = dbcurs.fetchone() count =", "while offset < count : sql = \"SELECT * FROM ways LIMIT %s,", "= rg.reverse_geocode(lat, lon) if geo: woeid = geo['woeid'] print \"[%s] update %s lat:", "if __name__ == '__main__' : path = sys.argv[1] reversegeo = sys.argv[2] munge(path, reversegeo)", "sql = \"SELECT * FROM ways LIMIT %s, %s\" % (offset, limit) print", "(%s)\" % (sql, count) dbcurs.execute(sql) for row in dbcurs.fetchall(): way_id, lat, lon, woeid,", "way_id, lat, lon, woeid, nodes, tags = row if lat and lon: pass", "lon=?, woeid=? WHERE id=?\", (lat, lon, woeid, way_id)) dbconn.commit() time.sleep(2) offset += limit", "shapely.geometry import Polygon from shapely.geometry import LineString def munge(path, reversegeo_endpoint) : # rg", "center: print \"no centroid for way %s\" % way_id print poly continue lat", "in nodes: dbcurs.execute(\"SELECT * FROM nodes WHERE id=?\", (node_id, )) node = dbcurs.fetchone()", "else : points.append(points[0]) poly = Polygon(points) center = poly.centroid if not center: print", "dbcurs.execute(sql) for row in dbcurs.fetchall(): way_id, lat, lon, woeid, nodes, tags = row", "= None if len(points) == 2: line = LineString(points) center = line.centroid else", "dbcurs.fetchone() count = row[0] offset = 0 limit = 5000 while offset <", "% (offset, limit) print \"%s (%s)\" % (sql, count) dbcurs.execute(sql) for row in", "\"[%s] update %s lat: %s, lon: %s, woeid: %s\" % (offset, way_id, lat,", "if lat and lon: pass # continue if woeid > 0: continue nodes", "= LineString(points) center = line.centroid else : points.append(points[0]) poly = Polygon(points) center =", "len(points) == 2: line = LineString(points) center = line.centroid else : points.append(points[0]) poly", "geo = rg.reverse_geocode(lat, lon) if geo: woeid = geo['woeid'] print \"[%s] update %s", "line = LineString(points) center = line.centroid else : points.append(points[0]) poly = Polygon(points) center", "continue lat = center.y lon = center.x woeid = 0 geo = rg.reverse_geocode(lat,", "= poly.centroid if not center: print \"no centroid for way %s\" % way_id", "5000 while offset < count : sql = \"SELECT * FROM ways LIMIT", "urllib2 import json import time import reverse_geoplanet from shapely.geometry import Polygon from shapely.geometry", "\"no centroid for way %s\" % way_id print poly continue lat = center.y", "dbconn = sqlite3.connect(path) dbcurs = dbconn.cursor() dbcurs.execute(\"SELECT COUNT(id) AS count FROM ways\") row", "%s\" % (offset, way_id, lat, lon, woeid) dbcurs.execute(\"UPDATE ways SET lat=?, lon=?, woeid=?", "import Polygon from shapely.geometry import LineString def munge(path, reversegeo_endpoint) : # rg =", "ways LIMIT %s, %s\" % (offset, limit) print \"%s (%s)\" % (sql, count)", "node_id in nodes: dbcurs.execute(\"SELECT * FROM nodes WHERE id=?\", (node_id, )) node =", "reversegeo_endpoint) : # rg = reverse_geoplanet.reverse_geoplanet(reversegeo_endpoint) dbconn = sqlite3.connect(path) dbcurs = dbconn.cursor() dbcurs.execute(\"SELECT", "lat=?, lon=?, woeid=? WHERE id=?\", (lat, lon, woeid, way_id)) dbconn.commit() time.sleep(2) offset +=", "dbcurs = dbconn.cursor() dbcurs.execute(\"SELECT COUNT(id) AS count FROM ways\") row = dbcurs.fetchone() count", "count FROM ways\") row = dbcurs.fetchone() count = row[0] offset = 0 limit", "way %s\" % way_id print poly continue lat = center.y lon = center.x", "dbcurs.execute(\"SELECT * FROM nodes WHERE id=?\", (node_id, )) node = dbcurs.fetchone() points.append((node[2], node[1]))", "LineString def munge(path, reversegeo_endpoint) : # rg = reverse_geoplanet.reverse_geoplanet(reversegeo_endpoint) dbconn = sqlite3.connect(path) dbcurs", "# rg = reverse_geoplanet.reverse_geoplanet(reversegeo_endpoint) dbconn = sqlite3.connect(path) dbcurs = dbconn.cursor() dbcurs.execute(\"SELECT COUNT(id) AS", "count) dbcurs.execute(sql) for row in dbcurs.fetchall(): way_id, lat, lon, woeid, nodes, tags =", "time.sleep(2) offset += limit return if __name__ == '__main__' : path = sys.argv[1]", "FROM ways\") row = dbcurs.fetchone() count = row[0] offset = 0 limit =", "limit return if __name__ == '__main__' : path = sys.argv[1] reversegeo = sys.argv[2]", "woeid > 0: continue nodes = nodes.split(',') points = [] for node_id in", "offset += limit return if __name__ == '__main__' : path = sys.argv[1] reversegeo", "import sqlite3 import urllib2 import json import time import reverse_geoplanet from shapely.geometry import", "lon: pass # continue if woeid > 0: continue nodes = nodes.split(',') points", "row[0] offset = 0 limit = 5000 while offset < count : sql", "if geo: woeid = geo['woeid'] print \"[%s] update %s lat: %s, lon: %s,", "Polygon(points) center = poly.centroid if not center: print \"no centroid for way %s\"", "0: continue nodes = nodes.split(',') points = [] for node_id in nodes: dbcurs.execute(\"SELECT", "points = [] for node_id in nodes: dbcurs.execute(\"SELECT * FROM nodes WHERE id=?\",", "COUNT(id) AS count FROM ways\") row = dbcurs.fetchone() count = row[0] offset =", "nodes.split(',') points = [] for node_id in nodes: dbcurs.execute(\"SELECT * FROM nodes WHERE", "lat = center.y lon = center.x woeid = 0 geo = rg.reverse_geocode(lat, lon)", "import urllib2 import json import time import reverse_geoplanet from shapely.geometry import Polygon from", "# continue if woeid > 0: continue nodes = nodes.split(',') points = []", "%s, woeid: %s\" % (offset, way_id, lat, lon, woeid) dbcurs.execute(\"UPDATE ways SET lat=?,", "sqlite3 import urllib2 import json import time import reverse_geoplanet from shapely.geometry import Polygon", "ways SET lat=?, lon=?, woeid=? WHERE id=?\", (lat, lon, woeid, way_id)) dbconn.commit() time.sleep(2)", "for row in dbcurs.fetchall(): way_id, lat, lon, woeid, nodes, tags = row if", "= [] for node_id in nodes: dbcurs.execute(\"SELECT * FROM nodes WHERE id=?\", (node_id,", "node[1])) center = None if len(points) == 2: line = LineString(points) center =", "< count : sql = \"SELECT * FROM ways LIMIT %s, %s\" %", "continue if woeid > 0: continue nodes = nodes.split(',') points = [] for", "+= limit return if __name__ == '__main__' : path = sys.argv[1] reversegeo =", "for node_id in nodes: dbcurs.execute(\"SELECT * FROM nodes WHERE id=?\", (node_id, )) node", "%s, %s\" % (offset, limit) print \"%s (%s)\" % (sql, count) dbcurs.execute(sql) for", "center = line.centroid else : points.append(points[0]) poly = Polygon(points) center = poly.centroid if", "lon, woeid, nodes, tags = row if lat and lon: pass # continue", "lat, lon, woeid, nodes, tags = row if lat and lon: pass #", "not center: print \"no centroid for way %s\" % way_id print poly continue", "nodes WHERE id=?\", (node_id, )) node = dbcurs.fetchone() points.append((node[2], node[1])) center = None", "WHERE id=?\", (lat, lon, woeid, way_id)) dbconn.commit() time.sleep(2) offset += limit return if", "woeid=? WHERE id=?\", (lat, lon, woeid, way_id)) dbconn.commit() time.sleep(2) offset += limit return", "center.y lon = center.x woeid = 0 geo = rg.reverse_geocode(lat, lon) if geo:", "row if lat and lon: pass # continue if woeid > 0: continue", "in dbcurs.fetchall(): way_id, lat, lon, woeid, nodes, tags = row if lat and", "if len(points) == 2: line = LineString(points) center = line.centroid else : points.append(points[0])", "%s\" % way_id print poly continue lat = center.y lon = center.x woeid", "row = dbcurs.fetchone() count = row[0] offset = 0 limit = 5000 while", "centroid for way %s\" % way_id print poly continue lat = center.y lon", "shapely import sqlite3 import urllib2 import json import time import reverse_geoplanet from shapely.geometry", "(offset, limit) print \"%s (%s)\" % (sql, count) dbcurs.execute(sql) for row in dbcurs.fetchall():", "print poly continue lat = center.y lon = center.x woeid = 0 geo", "node = dbcurs.fetchone() points.append((node[2], node[1])) center = None if len(points) == 2: line", "== 2: line = LineString(points) center = line.centroid else : points.append(points[0]) poly =", "time import reverse_geoplanet from shapely.geometry import Polygon from shapely.geometry import LineString def munge(path,", "% (offset, way_id, lat, lon, woeid) dbcurs.execute(\"UPDATE ways SET lat=?, lon=?, woeid=? WHERE", "center = None if len(points) == 2: line = LineString(points) center = line.centroid", "%s\" % (offset, limit) print \"%s (%s)\" % (sql, count) dbcurs.execute(sql) for row", "reverse_geoplanet.reverse_geoplanet(reversegeo_endpoint) dbconn = sqlite3.connect(path) dbcurs = dbconn.cursor() dbcurs.execute(\"SELECT COUNT(id) AS count FROM ways\")", "python import sys import shapely import sqlite3 import urllib2 import json import time", "count = row[0] offset = 0 limit = 5000 while offset < count", "tags = row if lat and lon: pass # continue if woeid >", "% way_id print poly continue lat = center.y lon = center.x woeid =", "= 0 geo = rg.reverse_geocode(lat, lon) if geo: woeid = geo['woeid'] print \"[%s]", "points.append((node[2], node[1])) center = None if len(points) == 2: line = LineString(points) center", "json import time import reverse_geoplanet from shapely.geometry import Polygon from shapely.geometry import LineString", "= reverse_geoplanet.reverse_geoplanet(reversegeo_endpoint) dbconn = sqlite3.connect(path) dbcurs = dbconn.cursor() dbcurs.execute(\"SELECT COUNT(id) AS count FROM", "lon: %s, woeid: %s\" % (offset, way_id, lat, lon, woeid) dbcurs.execute(\"UPDATE ways SET", "woeid = geo['woeid'] print \"[%s] update %s lat: %s, lon: %s, woeid: %s\"", "\"SELECT * FROM ways LIMIT %s, %s\" % (offset, limit) print \"%s (%s)\"", "0 geo = rg.reverse_geocode(lat, lon) if geo: woeid = geo['woeid'] print \"[%s] update", "def munge(path, reversegeo_endpoint) : # rg = reverse_geoplanet.reverse_geoplanet(reversegeo_endpoint) dbconn = sqlite3.connect(path) dbcurs =", "(lat, lon, woeid, way_id)) dbconn.commit() time.sleep(2) offset += limit return if __name__ ==", "= dbcurs.fetchone() count = row[0] offset = 0 limit = 5000 while offset", "0 limit = 5000 while offset < count : sql = \"SELECT *", "import sys import shapely import sqlite3 import urllib2 import json import time import", "dbcurs.fetchall(): way_id, lat, lon, woeid, nodes, tags = row if lat and lon:", "lat, lon, woeid) dbcurs.execute(\"UPDATE ways SET lat=?, lon=?, woeid=? WHERE id=?\", (lat, lon,", "return if __name__ == '__main__' : path = sys.argv[1] reversegeo = sys.argv[2] munge(path,", "reverse_geoplanet from shapely.geometry import Polygon from shapely.geometry import LineString def munge(path, reversegeo_endpoint) :", "rg = reverse_geoplanet.reverse_geoplanet(reversegeo_endpoint) dbconn = sqlite3.connect(path) dbcurs = dbconn.cursor() dbcurs.execute(\"SELECT COUNT(id) AS count", "geo: woeid = geo['woeid'] print \"[%s] update %s lat: %s, lon: %s, woeid:", "way_id)) dbconn.commit() time.sleep(2) offset += limit return if __name__ == '__main__' : path", "import shapely import sqlite3 import urllib2 import json import time import reverse_geoplanet from", "None if len(points) == 2: line = LineString(points) center = line.centroid else :", "import json import time import reverse_geoplanet from shapely.geometry import Polygon from shapely.geometry import", "2: line = LineString(points) center = line.centroid else : points.append(points[0]) poly = Polygon(points)", "= Polygon(points) center = poly.centroid if not center: print \"no centroid for way", "center = poly.centroid if not center: print \"no centroid for way %s\" %", "= row[0] offset = 0 limit = 5000 while offset < count :", "= nodes.split(',') points = [] for node_id in nodes: dbcurs.execute(\"SELECT * FROM nodes", "lon, woeid) dbcurs.execute(\"UPDATE ways SET lat=?, lon=?, woeid=? WHERE id=?\", (lat, lon, woeid,", "= center.y lon = center.x woeid = 0 geo = rg.reverse_geocode(lat, lon) if", "nodes: dbcurs.execute(\"SELECT * FROM nodes WHERE id=?\", (node_id, )) node = dbcurs.fetchone() points.append((node[2],", "LIMIT %s, %s\" % (offset, limit) print \"%s (%s)\" % (sql, count) dbcurs.execute(sql)", "#!/usr/bin/env python import sys import shapely import sqlite3 import urllib2 import json import", "= row if lat and lon: pass # continue if woeid > 0:", "count : sql = \"SELECT * FROM ways LIMIT %s, %s\" % (offset,", "%s lat: %s, lon: %s, woeid: %s\" % (offset, way_id, lat, lon, woeid)", "> 0: continue nodes = nodes.split(',') points = [] for node_id in nodes:", "id=?\", (lat, lon, woeid, way_id)) dbconn.commit() time.sleep(2) offset += limit return if __name__", "continue nodes = nodes.split(',') points = [] for node_id in nodes: dbcurs.execute(\"SELECT *", "dbcurs.execute(\"UPDATE ways SET lat=?, lon=?, woeid=? WHERE id=?\", (lat, lon, woeid, way_id)) dbconn.commit()", ": points.append(points[0]) poly = Polygon(points) center = poly.centroid if not center: print \"no", ": # rg = reverse_geoplanet.reverse_geoplanet(reversegeo_endpoint) dbconn = sqlite3.connect(path) dbcurs = dbconn.cursor() dbcurs.execute(\"SELECT COUNT(id)", "= line.centroid else : points.append(points[0]) poly = Polygon(points) center = poly.centroid if not", "= center.x woeid = 0 geo = rg.reverse_geocode(lat, lon) if geo: woeid =", "woeid) dbcurs.execute(\"UPDATE ways SET lat=?, lon=?, woeid=? WHERE id=?\", (lat, lon, woeid, way_id))", "nodes = nodes.split(',') points = [] for node_id in nodes: dbcurs.execute(\"SELECT * FROM", "line.centroid else : points.append(points[0]) poly = Polygon(points) center = poly.centroid if not center:", "row in dbcurs.fetchall(): way_id, lat, lon, woeid, nodes, tags = row if lat", "poly continue lat = center.y lon = center.x woeid = 0 geo =", "rg.reverse_geocode(lat, lon) if geo: woeid = geo['woeid'] print \"[%s] update %s lat: %s,", "FROM nodes WHERE id=?\", (node_id, )) node = dbcurs.fetchone() points.append((node[2], node[1])) center =", "woeid = 0 geo = rg.reverse_geocode(lat, lon) if geo: woeid = geo['woeid'] print", "import reverse_geoplanet from shapely.geometry import Polygon from shapely.geometry import LineString def munge(path, reversegeo_endpoint)", "ways\") row = dbcurs.fetchone() count = row[0] offset = 0 limit = 5000", "= 0 limit = 5000 while offset < count : sql = \"SELECT", "dbconn.cursor() dbcurs.execute(\"SELECT COUNT(id) AS count FROM ways\") row = dbcurs.fetchone() count = row[0]", "limit = 5000 while offset < count : sql = \"SELECT * FROM", "offset < count : sql = \"SELECT * FROM ways LIMIT %s, %s\"", "if not center: print \"no centroid for way %s\" % way_id print poly", "import LineString def munge(path, reversegeo_endpoint) : # rg = reverse_geoplanet.reverse_geoplanet(reversegeo_endpoint) dbconn = sqlite3.connect(path)", "AS count FROM ways\") row = dbcurs.fetchone() count = row[0] offset = 0", "sys import shapely import sqlite3 import urllib2 import json import time import reverse_geoplanet", "LineString(points) center = line.centroid else : points.append(points[0]) poly = Polygon(points) center = poly.centroid", "SET lat=?, lon=?, woeid=? WHERE id=?\", (lat, lon, woeid, way_id)) dbconn.commit() time.sleep(2) offset", "(sql, count) dbcurs.execute(sql) for row in dbcurs.fetchall(): way_id, lat, lon, woeid, nodes, tags", "woeid: %s\" % (offset, way_id, lat, lon, woeid) dbcurs.execute(\"UPDATE ways SET lat=?, lon=?,", "dbconn.commit() time.sleep(2) offset += limit return if __name__ == '__main__' : path =", "sqlite3.connect(path) dbcurs = dbconn.cursor() dbcurs.execute(\"SELECT COUNT(id) AS count FROM ways\") row = dbcurs.fetchone()", "limit) print \"%s (%s)\" % (sql, count) dbcurs.execute(sql) for row in dbcurs.fetchall(): way_id,", "lat: %s, lon: %s, woeid: %s\" % (offset, way_id, lat, lon, woeid) dbcurs.execute(\"UPDATE", "offset = 0 limit = 5000 while offset < count : sql =", "= \"SELECT * FROM ways LIMIT %s, %s\" % (offset, limit) print \"%s", "nodes, tags = row if lat and lon: pass # continue if woeid", "= dbcurs.fetchone() points.append((node[2], node[1])) center = None if len(points) == 2: line =", "woeid, nodes, tags = row if lat and lon: pass # continue if", "points.append(points[0]) poly = Polygon(points) center = poly.centroid if not center: print \"no centroid", "from shapely.geometry import Polygon from shapely.geometry import LineString def munge(path, reversegeo_endpoint) : #", "import time import reverse_geoplanet from shapely.geometry import Polygon from shapely.geometry import LineString def", "= sqlite3.connect(path) dbcurs = dbconn.cursor() dbcurs.execute(\"SELECT COUNT(id) AS count FROM ways\") row =", "print \"[%s] update %s lat: %s, lon: %s, woeid: %s\" % (offset, way_id,", "center.x woeid = 0 geo = rg.reverse_geocode(lat, lon) if geo: woeid = geo['woeid']", "* FROM ways LIMIT %s, %s\" % (offset, limit) print \"%s (%s)\" %", "lon) if geo: woeid = geo['woeid'] print \"[%s] update %s lat: %s, lon:", "update %s lat: %s, lon: %s, woeid: %s\" % (offset, way_id, lat, lon,", "for way %s\" % way_id print poly continue lat = center.y lon =", "print \"no centroid for way %s\" % way_id print poly continue lat =", ")) node = dbcurs.fetchone() points.append((node[2], node[1])) center = None if len(points) == 2:", "[] for node_id in nodes: dbcurs.execute(\"SELECT * FROM nodes WHERE id=?\", (node_id, ))", "poly = Polygon(points) center = poly.centroid if not center: print \"no centroid for", "lat and lon: pass # continue if woeid > 0: continue nodes =", "print \"%s (%s)\" % (sql, count) dbcurs.execute(sql) for row in dbcurs.fetchall(): way_id, lat,", "FROM ways LIMIT %s, %s\" % (offset, limit) print \"%s (%s)\" % (sql,", "from shapely.geometry import LineString def munge(path, reversegeo_endpoint) : # rg = reverse_geoplanet.reverse_geoplanet(reversegeo_endpoint) dbconn", "%s, lon: %s, woeid: %s\" % (offset, way_id, lat, lon, woeid) dbcurs.execute(\"UPDATE ways", "way_id print poly continue lat = center.y lon = center.x woeid = 0", "geo['woeid'] print \"[%s] update %s lat: %s, lon: %s, woeid: %s\" % (offset,", "Polygon from shapely.geometry import LineString def munge(path, reversegeo_endpoint) : # rg = reverse_geoplanet.reverse_geoplanet(reversegeo_endpoint)", ": sql = \"SELECT * FROM ways LIMIT %s, %s\" % (offset, limit)", "way_id, lat, lon, woeid) dbcurs.execute(\"UPDATE ways SET lat=?, lon=?, woeid=? WHERE id=?\", (lat,", "WHERE id=?\", (node_id, )) node = dbcurs.fetchone() points.append((node[2], node[1])) center = None if", "if woeid > 0: continue nodes = nodes.split(',') points = [] for node_id", "woeid, way_id)) dbconn.commit() time.sleep(2) offset += limit return if __name__ == '__main__' :", "dbcurs.execute(\"SELECT COUNT(id) AS count FROM ways\") row = dbcurs.fetchone() count = row[0] offset", "pass # continue if woeid > 0: continue nodes = nodes.split(',') points =", "and lon: pass # continue if woeid > 0: continue nodes = nodes.split(',')", "poly.centroid if not center: print \"no centroid for way %s\" % way_id print", "= geo['woeid'] print \"[%s] update %s lat: %s, lon: %s, woeid: %s\" %", "\"%s (%s)\" % (sql, count) dbcurs.execute(sql) for row in dbcurs.fetchall(): way_id, lat, lon,", "id=?\", (node_id, )) node = dbcurs.fetchone() points.append((node[2], node[1])) center = None if len(points)", "lon = center.x woeid = 0 geo = rg.reverse_geocode(lat, lon) if geo: woeid", "% (sql, count) dbcurs.execute(sql) for row in dbcurs.fetchall(): way_id, lat, lon, woeid, nodes,", "(offset, way_id, lat, lon, woeid) dbcurs.execute(\"UPDATE ways SET lat=?, lon=?, woeid=? WHERE id=?\",", "shapely.geometry import LineString def munge(path, reversegeo_endpoint) : # rg = reverse_geoplanet.reverse_geoplanet(reversegeo_endpoint) dbconn =", "munge(path, reversegeo_endpoint) : # rg = reverse_geoplanet.reverse_geoplanet(reversegeo_endpoint) dbconn = sqlite3.connect(path) dbcurs = dbconn.cursor()", "(node_id, )) node = dbcurs.fetchone() points.append((node[2], node[1])) center = None if len(points) ==", "* FROM nodes WHERE id=?\", (node_id, )) node = dbcurs.fetchone() points.append((node[2], node[1])) center" ]
[ "open(fileName,'r') a = json.loads(file.read()) file.close() return a # NGな単語かNGな語句が入っていたらTrue そうでないならFalse def is_ng_trend(trend): if", "sqlalchemy import and_ import random # /home/*/hama_dbとかが返ってくる #exec_path = os.path.abspath(os.path.dirname(__file__)).rsplit(\"/\",1)[0] exec_path = \".\"", "#l = tw.home_timeline(page = page_number, count=10) #Toyko座標ベタ打ち woeid = tw.trends_closest(35.652832, 139.839478)[0]['woeid'] trends_place =", "s['name'] if trend.startswith(\"#\"): trend = trend[1:] #print(trend) update_flag = check_text(trend, dbSession) if(not(update_flag)): continue", "continue if(random.randint(0,1)): text = \"な、なによ……! ニコだって\" + trend +\\ \"くらいできるんだから!!\" else: text =", "= json.loads(file.read()) file.close() return a # NGな単語かNGな語句が入っていたらTrue そうでないならFalse def is_ng_trend(trend): if trend in", "#exec_path = os.path.abspath(os.path.dirname(__file__)).rsplit(\"/\",1)[0] exec_path = \".\" conf_path = exec_path+\"/common/config.json\" ng_char_path = exec_path+\"/common/ng_char.json\" sys.path.insert(0,exec_path)", "= model.Trend() t.text = text #t.datetime = jTime dbSession.add(t) return True def main():", "import datetime from sqlalchemy import and_ import random # /home/*/hama_dbとかが返ってくる #exec_path = os.path.abspath(os.path.dirname(__file__)).rsplit(\"/\",1)[0]", ") if( query.count() > 0 ): return False #ここに品詞判定辺り入れる t = model.Trend() t.text", "\".\" conf_path = exec_path+\"/common/config.json\" ng_char_path = exec_path+\"/common/ng_char.json\" sys.path.insert(0,exec_path) from common import auth_api, model", "a # NGな単語かNGな語句が入っていたらTrue そうでないならFalse def is_ng_trend(trend): if trend in g_ngTrend: return True for", "for ng_char in g_ng_char: if ng_char in trend: return True return False \"\"\"", "dbSession) if(not(update_flag)): continue if(random.randint(0,1)): text = \"な、なによ……! ニコだって\" + trend +\\ \"くらいできるんだから!!\" else:", "model import tweepy # 格納しないテキストのリスト g_ngTrend = [ \"オフパコ\", \"フルチン\" ] # ファイルから読み出すので空に変更", "def main(): # twitterから発言を取ってきてDBに格納する userdata = read_json(conf_path) g_ng_char = read_json(ng_char_path) tw = auth_api.connect(userdata[\"consumer_token\"],", "+ datetime.timedelta(hours = 9) query = dbSession.query(model.Trend).filter( model.Trend.text == text ) if( query.count()", "= model.startSession(userdata) page_number = 0 update_flag = True while update_flag: update_flag = False", "exec_path+\"/common/config.json\" ng_char_path = exec_path+\"/common/ng_char.json\" sys.path.insert(0,exec_path) from common import auth_api, model import tweepy #", "if ng_char in trend: return True return False \"\"\" テキストが適合している = True 重複してたり、RTだったり", "= 9) query = dbSession.query(model.Trend).filter( model.Trend.text == text ) if( query.count() > 0", "model.startSession(userdata) page_number = 0 update_flag = True while update_flag: update_flag = False page_number", "#jTime = created_at + datetime.timedelta(hours = 9) query = dbSession.query(model.Trend).filter( model.Trend.text == text", "= text #t.datetime = jTime dbSession.add(t) return True def main(): # twitterから発言を取ってきてDBに格納する userdata", "dbSession.query(model.Trend).filter( model.Trend.text == text ) if( query.count() > 0 ): return False #ここに品詞判定辺り入れる", "> 1: break #l = tw.home_timeline(page = page_number, count=10) #Toyko座標ベタ打ち woeid = tw.trends_closest(35.652832,", "in g_ng_char: if ng_char in trend: return True return False \"\"\" テキストが適合している =", "datetime.timedelta(hours = 9) query = dbSession.query(model.Trend).filter( model.Trend.text == text ) if( query.count() >", "query = dbSession.query(model.Trend).filter( model.Trend.text == text ) if( query.count() > 0 ): return", "= s['name'] if trend.startswith(\"#\"): trend = trend[1:] #print(trend) update_flag = check_text(trend, dbSession) if(not(update_flag)):", "<filename>workflow/src/crawler.py #!/usr/bin/env python # -*- coding: utf-8 -*- import sys import os import", "dbSession): if( is_ng_trend(text) ): return False #jTime = created_at + datetime.timedelta(hours = 9)", "#print(trend) update_flag = check_text(trend, dbSession) if(not(update_flag)): continue if(random.randint(0,1)): text = \"な、なによ……! ニコだって\" +", "in g_ngTrend: return True for ng_char in g_ng_char: if ng_char in trend: return", "dbSession = None def read_json(fileName): file = open(fileName,'r') a = json.loads(file.read()) file.close() return", "False #ここに品詞判定辺り入れる t = model.Trend() t.text = text #t.datetime = jTime dbSession.add(t) return", "[ \"オフパコ\", \"フルチン\" ] # ファイルから読み出すので空に変更 g_ng_char = [] dbSession = None def", "import auth_api, model import tweepy # 格納しないテキストのリスト g_ngTrend = [ \"オフパコ\", \"フルチン\" ]", "True while update_flag: update_flag = False page_number += 1 if page_number > 1:", "= None def read_json(fileName): file = open(fileName,'r') a = json.loads(file.read()) file.close() return a", "if(not(update_flag)): continue if(random.randint(0,1)): text = \"な、なによ……! ニコだって\" + trend +\\ \"くらいできるんだから!!\" else: text", "update_flag = check_text(trend, dbSession) if(not(update_flag)): continue if(random.randint(0,1)): text = \"な、なによ……! ニコだって\" + trend", "json.loads(file.read()) file.close() return a # NGな単語かNGな語句が入っていたらTrue そうでないならFalse def is_ng_trend(trend): if trend in g_ngTrend:", "= trend + \"と言えば?\\nニコニー♪\\nかわいい\" +\\ trend +\"と言えば?\\nニコニー♪\" try: tw.update_status(text) print(\"trend \"+trend) except tweepy.TweepError:", "common import auth_api, model import tweepy # 格納しないテキストのリスト g_ngTrend = [ \"オフパコ\", \"フルチン\"", "ng_char in trend: return True return False \"\"\" テキストが適合している = True 重複してたり、RTだったり =", "text ) if( query.count() > 0 ): return False #ここに品詞判定辺り入れる t = model.Trend()", "page_number = 0 update_flag = True while update_flag: update_flag = False page_number +=", "tw.trends_closest(35.652832, 139.839478)[0]['woeid'] trends_place = tw.trends_place(woeid) l = trends_place[0]['trends'] for s in l: trend", "= [] dbSession = None def read_json(fileName): file = open(fileName,'r') a = json.loads(file.read())", "\"オフパコ\", \"フルチン\" ] # ファイルから読み出すので空に変更 g_ng_char = [] dbSession = None def read_json(fileName):", "-*- import sys import os import json import datetime from sqlalchemy import and_", "ファイルから読み出すので空に変更 g_ng_char = [] dbSession = None def read_json(fileName): file = open(fileName,'r') a", "= [ \"オフパコ\", \"フルチン\" ] # ファイルから読み出すので空に変更 g_ng_char = [] dbSession = None", "= jTime dbSession.add(t) return True def main(): # twitterから発言を取ってきてDBに格納する userdata = read_json(conf_path) g_ng_char", "utf-8 -*- import sys import os import json import datetime from sqlalchemy import", "from common import auth_api, model import tweepy # 格納しないテキストのリスト g_ngTrend = [ \"オフパコ\",", "= dbSession.query(model.Trend).filter( model.Trend.text == text ) if( query.count() > 0 ): return False", "#print tw.rate_limit_status() dbSession = model.startSession(userdata) page_number = 0 update_flag = True while update_flag:", "tw.rate_limit_status() dbSession = model.startSession(userdata) page_number = 0 update_flag = True while update_flag: update_flag", "os import json import datetime from sqlalchemy import and_ import random # /home/*/hama_dbとかが返ってくる", "print(\"trend \"+trend) except tweepy.TweepError: pass #print(\"flag: \", update_flag) if update_flag: break dbSession.commit() if", "woeid = tw.trends_closest(35.652832, 139.839478)[0]['woeid'] trends_place = tw.trends_place(woeid) l = trends_place[0]['trends'] for s in", "# twitterから発言を取ってきてDBに格納する userdata = read_json(conf_path) g_ng_char = read_json(ng_char_path) tw = auth_api.connect(userdata[\"consumer_token\"], userdata[\"consumer_secret\"], exec_path+\"/common/\")", "= exec_path+\"/common/ng_char.json\" sys.path.insert(0,exec_path) from common import auth_api, model import tweepy # 格納しないテキストのリスト g_ngTrend", "True for ng_char in g_ng_char: if ng_char in trend: return True return False", "False \"\"\" def check_text(text, dbSession): if( is_ng_trend(text) ): return False #jTime = created_at", "update_flag: update_flag = False page_number += 1 if page_number > 1: break #l", "if(random.randint(0,1)): text = \"な、なによ……! ニコだって\" + trend +\\ \"くらいできるんだから!!\" else: text = trend", "= \"な、なによ……! ニコだって\" + trend +\\ \"くらいできるんだから!!\" else: text = trend + \"と言えば?\\nニコニー♪\\nかわいい\"", "text = \"な、なによ……! ニコだって\" + trend +\\ \"くらいできるんだから!!\" else: text = trend +", "True return False \"\"\" テキストが適合している = True 重複してたり、RTだったり = False \"\"\" def check_text(text,", "return True return False \"\"\" テキストが適合している = True 重複してたり、RTだったり = False \"\"\" def", "coding: utf-8 -*- import sys import os import json import datetime from sqlalchemy", "auth_api, model import tweepy # 格納しないテキストのリスト g_ngTrend = [ \"オフパコ\", \"フルチン\" ] #", "ng_char in g_ng_char: if ng_char in trend: return True return False \"\"\" テキストが適合している", "check_text(text, dbSession): if( is_ng_trend(text) ): return False #jTime = created_at + datetime.timedelta(hours =", "+\"と言えば?\\nニコニー♪\" try: tw.update_status(text) print(\"trend \"+trend) except tweepy.TweepError: pass #print(\"flag: \", update_flag) if update_flag:", "None def read_json(fileName): file = open(fileName,'r') a = json.loads(file.read()) file.close() return a #", "\"と言えば?\\nニコニー♪\\nかわいい\" +\\ trend +\"と言えば?\\nニコニー♪\" try: tw.update_status(text) print(\"trend \"+trend) except tweepy.TweepError: pass #print(\"flag: \",", "#!/usr/bin/env python # -*- coding: utf-8 -*- import sys import os import json", "\"\"\" def check_text(text, dbSession): if( is_ng_trend(text) ): return False #jTime = created_at +", "tw.update_status(text) print(\"trend \"+trend) except tweepy.TweepError: pass #print(\"flag: \", update_flag) if update_flag: break dbSession.commit()", "read_json(conf_path) g_ng_char = read_json(ng_char_path) tw = auth_api.connect(userdata[\"consumer_token\"], userdata[\"consumer_secret\"], exec_path+\"/common/\") #print tw.rate_limit_status() dbSession =", "# -*- coding: utf-8 -*- import sys import os import json import datetime", "テキストが適合している = True 重複してたり、RTだったり = False \"\"\" def check_text(text, dbSession): if( is_ng_trend(text) ):", "= exec_path+\"/common/config.json\" ng_char_path = exec_path+\"/common/ng_char.json\" sys.path.insert(0,exec_path) from common import auth_api, model import tweepy", "read_json(fileName): file = open(fileName,'r') a = json.loads(file.read()) file.close() return a # NGな単語かNGな語句が入っていたらTrue そうでないならFalse", "[] dbSession = None def read_json(fileName): file = open(fileName,'r') a = json.loads(file.read()) file.close()", "= trends_place[0]['trends'] for s in l: trend = s['name'] if trend.startswith(\"#\"): trend =", "json import datetime from sqlalchemy import and_ import random # /home/*/hama_dbとかが返ってくる #exec_path =", "= trend[1:] #print(trend) update_flag = check_text(trend, dbSession) if(not(update_flag)): continue if(random.randint(0,1)): text = \"な、なによ……!", "if trend.startswith(\"#\"): trend = trend[1:] #print(trend) update_flag = check_text(trend, dbSession) if(not(update_flag)): continue if(random.randint(0,1)):", "trend = s['name'] if trend.startswith(\"#\"): trend = trend[1:] #print(trend) update_flag = check_text(trend, dbSession)", "重複してたり、RTだったり = False \"\"\" def check_text(text, dbSession): if( is_ng_trend(text) ): return False #jTime", "twitterから発言を取ってきてDBに格納する userdata = read_json(conf_path) g_ng_char = read_json(ng_char_path) tw = auth_api.connect(userdata[\"consumer_token\"], userdata[\"consumer_secret\"], exec_path+\"/common/\") #print", "in trend: return True return False \"\"\" テキストが適合している = True 重複してたり、RTだったり = False", "os.path.abspath(os.path.dirname(__file__)).rsplit(\"/\",1)[0] exec_path = \".\" conf_path = exec_path+\"/common/config.json\" ng_char_path = exec_path+\"/common/ng_char.json\" sys.path.insert(0,exec_path) from common", "# 格納しないテキストのリスト g_ngTrend = [ \"オフパコ\", \"フルチン\" ] # ファイルから読み出すので空に変更 g_ng_char = []", "if( is_ng_trend(text) ): return False #jTime = created_at + datetime.timedelta(hours = 9) query", "True def main(): # twitterから発言を取ってきてDBに格納する userdata = read_json(conf_path) g_ng_char = read_json(ng_char_path) tw =", "main(): # twitterから発言を取ってきてDBに格納する userdata = read_json(conf_path) g_ng_char = read_json(ng_char_path) tw = auth_api.connect(userdata[\"consumer_token\"], userdata[\"consumer_secret\"],", "a = json.loads(file.read()) file.close() return a # NGな単語かNGな語句が入っていたらTrue そうでないならFalse def is_ng_trend(trend): if trend", "= open(fileName,'r') a = json.loads(file.read()) file.close() return a # NGな単語かNGな語句が入っていたらTrue そうでないならFalse def is_ng_trend(trend):", "g_ngTrend = [ \"オフパコ\", \"フルチン\" ] # ファイルから読み出すので空に変更 g_ng_char = [] dbSession =", "True 重複してたり、RTだったり = False \"\"\" def check_text(text, dbSession): if( is_ng_trend(text) ): return False", "t = model.Trend() t.text = text #t.datetime = jTime dbSession.add(t) return True def", "= read_json(conf_path) g_ng_char = read_json(ng_char_path) tw = auth_api.connect(userdata[\"consumer_token\"], userdata[\"consumer_secret\"], exec_path+\"/common/\") #print tw.rate_limit_status() dbSession", "\"くらいできるんだから!!\" else: text = trend + \"と言えば?\\nニコニー♪\\nかわいい\" +\\ trend +\"と言えば?\\nニコニー♪\" try: tw.update_status(text) print(\"trend", "sys.path.insert(0,exec_path) from common import auth_api, model import tweepy # 格納しないテキストのリスト g_ngTrend = [", "): return False #ここに品詞判定辺り入れる t = model.Trend() t.text = text #t.datetime = jTime", "ng_char_path = exec_path+\"/common/ng_char.json\" sys.path.insert(0,exec_path) from common import auth_api, model import tweepy # 格納しないテキストのリスト", "/home/*/hama_dbとかが返ってくる #exec_path = os.path.abspath(os.path.dirname(__file__)).rsplit(\"/\",1)[0] exec_path = \".\" conf_path = exec_path+\"/common/config.json\" ng_char_path = exec_path+\"/common/ng_char.json\"", "= tw.home_timeline(page = page_number, count=10) #Toyko座標ベタ打ち woeid = tw.trends_closest(35.652832, 139.839478)[0]['woeid'] trends_place = tw.trends_place(woeid)", "else: text = trend + \"と言えば?\\nニコニー♪\\nかわいい\" +\\ trend +\"と言えば?\\nニコニー♪\" try: tw.update_status(text) print(\"trend \"+trend)", "file.close() return a # NGな単語かNGな語句が入っていたらTrue そうでないならFalse def is_ng_trend(trend): if trend in g_ngTrend: return", "import sys import os import json import datetime from sqlalchemy import and_ import", "trend + \"と言えば?\\nニコニー♪\\nかわいい\" +\\ trend +\"と言えば?\\nニコニー♪\" try: tw.update_status(text) print(\"trend \"+trend) except tweepy.TweepError: pass", "\"フルチン\" ] # ファイルから読み出すので空に変更 g_ng_char = [] dbSession = None def read_json(fileName): file", "= False \"\"\" def check_text(text, dbSession): if( is_ng_trend(text) ): return False #jTime =", "= auth_api.connect(userdata[\"consumer_token\"], userdata[\"consumer_secret\"], exec_path+\"/common/\") #print tw.rate_limit_status() dbSession = model.startSession(userdata) page_number = 0 update_flag", "= tw.trends_place(woeid) l = trends_place[0]['trends'] for s in l: trend = s['name'] if", "userdata = read_json(conf_path) g_ng_char = read_json(ng_char_path) tw = auth_api.connect(userdata[\"consumer_token\"], userdata[\"consumer_secret\"], exec_path+\"/common/\") #print tw.rate_limit_status()", "そうでないならFalse def is_ng_trend(trend): if trend in g_ngTrend: return True for ng_char in g_ng_char:", "= \".\" conf_path = exec_path+\"/common/config.json\" ng_char_path = exec_path+\"/common/ng_char.json\" sys.path.insert(0,exec_path) from common import auth_api,", "exec_path+\"/common/\") #print tw.rate_limit_status() dbSession = model.startSession(userdata) page_number = 0 update_flag = True while", "> 0 ): return False #ここに品詞判定辺り入れる t = model.Trend() t.text = text #t.datetime", "False #jTime = created_at + datetime.timedelta(hours = 9) query = dbSession.query(model.Trend).filter( model.Trend.text ==", "trends_place[0]['trends'] for s in l: trend = s['name'] if trend.startswith(\"#\"): trend = trend[1:]", "= False page_number += 1 if page_number > 1: break #l = tw.home_timeline(page", "import json import datetime from sqlalchemy import and_ import random # /home/*/hama_dbとかが返ってくる #exec_path", "page_number, count=10) #Toyko座標ベタ打ち woeid = tw.trends_closest(35.652832, 139.839478)[0]['woeid'] trends_place = tw.trends_place(woeid) l = trends_place[0]['trends']", "return True def main(): # twitterから発言を取ってきてDBに格納する userdata = read_json(conf_path) g_ng_char = read_json(ng_char_path) tw", "sys import os import json import datetime from sqlalchemy import and_ import random", "139.839478)[0]['woeid'] trends_place = tw.trends_place(woeid) l = trends_place[0]['trends'] for s in l: trend =", "return False #jTime = created_at + datetime.timedelta(hours = 9) query = dbSession.query(model.Trend).filter( model.Trend.text", "== text ) if( query.count() > 0 ): return False #ここに品詞判定辺り入れる t =", "1: break #l = tw.home_timeline(page = page_number, count=10) #Toyko座標ベタ打ち woeid = tw.trends_closest(35.652832, 139.839478)[0]['woeid']", "False page_number += 1 if page_number > 1: break #l = tw.home_timeline(page =", "jTime dbSession.add(t) return True def main(): # twitterから発言を取ってきてDBに格納する userdata = read_json(conf_path) g_ng_char =", "tw = auth_api.connect(userdata[\"consumer_token\"], userdata[\"consumer_secret\"], exec_path+\"/common/\") #print tw.rate_limit_status() dbSession = model.startSession(userdata) page_number = 0", "userdata[\"consumer_secret\"], exec_path+\"/common/\") #print tw.rate_limit_status() dbSession = model.startSession(userdata) page_number = 0 update_flag = True", "] # ファイルから読み出すので空に変更 g_ng_char = [] dbSession = None def read_json(fileName): file =", "= page_number, count=10) #Toyko座標ベタ打ち woeid = tw.trends_closest(35.652832, 139.839478)[0]['woeid'] trends_place = tw.trends_place(woeid) l =", "trend[1:] #print(trend) update_flag = check_text(trend, dbSession) if(not(update_flag)): continue if(random.randint(0,1)): text = \"な、なによ……! ニコだって\"", "update_flag = False page_number += 1 if page_number > 1: break #l =", "model.Trend.text == text ) if( query.count() > 0 ): return False #ここに品詞判定辺り入れる t", "#t.datetime = jTime dbSession.add(t) return True def main(): # twitterから発言を取ってきてDBに格納する userdata = read_json(conf_path)", "tweepy.TweepError: pass #print(\"flag: \", update_flag) if update_flag: break dbSession.commit() if __name__ == \"__main__\":", "= 0 update_flag = True while update_flag: update_flag = False page_number += 1", "s in l: trend = s['name'] if trend.startswith(\"#\"): trend = trend[1:] #print(trend) update_flag", "in l: trend = s['name'] if trend.startswith(\"#\"): trend = trend[1:] #print(trend) update_flag =", "g_ng_char: if ng_char in trend: return True return False \"\"\" テキストが適合している = True", "False \"\"\" テキストが適合している = True 重複してたり、RTだったり = False \"\"\" def check_text(text, dbSession): if(", "import and_ import random # /home/*/hama_dbとかが返ってくる #exec_path = os.path.abspath(os.path.dirname(__file__)).rsplit(\"/\",1)[0] exec_path = \".\" conf_path", "g_ng_char = [] dbSession = None def read_json(fileName): file = open(fileName,'r') a =", "1 if page_number > 1: break #l = tw.home_timeline(page = page_number, count=10) #Toyko座標ベタ打ち", "+= 1 if page_number > 1: break #l = tw.home_timeline(page = page_number, count=10)", "count=10) #Toyko座標ベタ打ち woeid = tw.trends_closest(35.652832, 139.839478)[0]['woeid'] trends_place = tw.trends_place(woeid) l = trends_place[0]['trends'] for", "trends_place = tw.trends_place(woeid) l = trends_place[0]['trends'] for s in l: trend = s['name']", "text = trend + \"と言えば?\\nニコニー♪\\nかわいい\" +\\ trend +\"と言えば?\\nニコニー♪\" try: tw.update_status(text) print(\"trend \"+trend) except", "0 ): return False #ここに品詞判定辺り入れる t = model.Trend() t.text = text #t.datetime =", "\"\"\" テキストが適合している = True 重複してたり、RTだったり = False \"\"\" def check_text(text, dbSession): if( is_ng_trend(text)", "if page_number > 1: break #l = tw.home_timeline(page = page_number, count=10) #Toyko座標ベタ打ち woeid", "trend.startswith(\"#\"): trend = trend[1:] #print(trend) update_flag = check_text(trend, dbSession) if(not(update_flag)): continue if(random.randint(0,1)): text", "): return False #jTime = created_at + datetime.timedelta(hours = 9) query = dbSession.query(model.Trend).filter(", "g_ng_char = read_json(ng_char_path) tw = auth_api.connect(userdata[\"consumer_token\"], userdata[\"consumer_secret\"], exec_path+\"/common/\") #print tw.rate_limit_status() dbSession = model.startSession(userdata)", "= read_json(ng_char_path) tw = auth_api.connect(userdata[\"consumer_token\"], userdata[\"consumer_secret\"], exec_path+\"/common/\") #print tw.rate_limit_status() dbSession = model.startSession(userdata) page_number", "trend: return True return False \"\"\" テキストが適合している = True 重複してたり、RTだったり = False \"\"\"", "+ trend +\\ \"くらいできるんだから!!\" else: text = trend + \"と言えば?\\nニコニー♪\\nかわいい\" +\\ trend +\"と言えば?\\nニコニー♪\"", "g_ngTrend: return True for ng_char in g_ng_char: if ng_char in trend: return True", "l = trends_place[0]['trends'] for s in l: trend = s['name'] if trend.startswith(\"#\"): trend", "tw.trends_place(woeid) l = trends_place[0]['trends'] for s in l: trend = s['name'] if trend.startswith(\"#\"):", "l: trend = s['name'] if trend.startswith(\"#\"): trend = trend[1:] #print(trend) update_flag = check_text(trend,", "return False \"\"\" テキストが適合している = True 重複してたり、RTだったり = False \"\"\" def check_text(text, dbSession):", "= True while update_flag: update_flag = False page_number += 1 if page_number >", "except tweepy.TweepError: pass #print(\"flag: \", update_flag) if update_flag: break dbSession.commit() if __name__ ==", "file = open(fileName,'r') a = json.loads(file.read()) file.close() return a # NGな単語かNGな語句が入っていたらTrue そうでないならFalse def", "格納しないテキストのリスト g_ngTrend = [ \"オフパコ\", \"フルチン\" ] # ファイルから読み出すので空に変更 g_ng_char = [] dbSession", "from sqlalchemy import and_ import random # /home/*/hama_dbとかが返ってくる #exec_path = os.path.abspath(os.path.dirname(__file__)).rsplit(\"/\",1)[0] exec_path =", "t.text = text #t.datetime = jTime dbSession.add(t) return True def main(): # twitterから発言を取ってきてDBに格納する", "dbSession = model.startSession(userdata) page_number = 0 update_flag = True while update_flag: update_flag =", "NGな単語かNGな語句が入っていたらTrue そうでないならFalse def is_ng_trend(trend): if trend in g_ngTrend: return True for ng_char in", "trend in g_ngTrend: return True for ng_char in g_ng_char: if ng_char in trend:", "9) query = dbSession.query(model.Trend).filter( model.Trend.text == text ) if( query.count() > 0 ):", "+\\ \"くらいできるんだから!!\" else: text = trend + \"と言えば?\\nニコニー♪\\nかわいい\" +\\ trend +\"と言えば?\\nニコニー♪\" try: tw.update_status(text)", "# ファイルから読み出すので空に変更 g_ng_char = [] dbSession = None def read_json(fileName): file = open(fileName,'r')", "return True for ng_char in g_ng_char: if ng_char in trend: return True return", "for s in l: trend = s['name'] if trend.startswith(\"#\"): trend = trend[1:] #print(trend)", "random # /home/*/hama_dbとかが返ってくる #exec_path = os.path.abspath(os.path.dirname(__file__)).rsplit(\"/\",1)[0] exec_path = \".\" conf_path = exec_path+\"/common/config.json\" ng_char_path", "+\\ trend +\"と言えば?\\nニコニー♪\" try: tw.update_status(text) print(\"trend \"+trend) except tweepy.TweepError: pass #print(\"flag: \", update_flag)", "trend +\"と言えば?\\nニコニー♪\" try: tw.update_status(text) print(\"trend \"+trend) except tweepy.TweepError: pass #print(\"flag: \", update_flag) if", "#ここに品詞判定辺り入れる t = model.Trend() t.text = text #t.datetime = jTime dbSession.add(t) return True", "#Toyko座標ベタ打ち woeid = tw.trends_closest(35.652832, 139.839478)[0]['woeid'] trends_place = tw.trends_place(woeid) l = trends_place[0]['trends'] for s", "# /home/*/hama_dbとかが返ってくる #exec_path = os.path.abspath(os.path.dirname(__file__)).rsplit(\"/\",1)[0] exec_path = \".\" conf_path = exec_path+\"/common/config.json\" ng_char_path =", "exec_path+\"/common/ng_char.json\" sys.path.insert(0,exec_path) from common import auth_api, model import tweepy # 格納しないテキストのリスト g_ngTrend =", "check_text(trend, dbSession) if(not(update_flag)): continue if(random.randint(0,1)): text = \"な、なによ……! ニコだって\" + trend +\\ \"くらいできるんだから!!\"", "\"+trend) except tweepy.TweepError: pass #print(\"flag: \", update_flag) if update_flag: break dbSession.commit() if __name__", "datetime from sqlalchemy import and_ import random # /home/*/hama_dbとかが返ってくる #exec_path = os.path.abspath(os.path.dirname(__file__)).rsplit(\"/\",1)[0] exec_path", "pass #print(\"flag: \", update_flag) if update_flag: break dbSession.commit() if __name__ == \"__main__\": main()", "= tw.trends_closest(35.652832, 139.839478)[0]['woeid'] trends_place = tw.trends_place(woeid) l = trends_place[0]['trends'] for s in l:", "trend = trend[1:] #print(trend) update_flag = check_text(trend, dbSession) if(not(update_flag)): continue if(random.randint(0,1)): text =", "auth_api.connect(userdata[\"consumer_token\"], userdata[\"consumer_secret\"], exec_path+\"/common/\") #print tw.rate_limit_status() dbSession = model.startSession(userdata) page_number = 0 update_flag =", "while update_flag: update_flag = False page_number += 1 if page_number > 1: break", "read_json(ng_char_path) tw = auth_api.connect(userdata[\"consumer_token\"], userdata[\"consumer_secret\"], exec_path+\"/common/\") #print tw.rate_limit_status() dbSession = model.startSession(userdata) page_number =", "import os import json import datetime from sqlalchemy import and_ import random #", "page_number += 1 if page_number > 1: break #l = tw.home_timeline(page = page_number,", "model.Trend() t.text = text #t.datetime = jTime dbSession.add(t) return True def main(): #", "break #l = tw.home_timeline(page = page_number, count=10) #Toyko座標ベタ打ち woeid = tw.trends_closest(35.652832, 139.839478)[0]['woeid'] trends_place", "dbSession.add(t) return True def main(): # twitterから発言を取ってきてDBに格納する userdata = read_json(conf_path) g_ng_char = read_json(ng_char_path)", "+ \"と言えば?\\nニコニー♪\\nかわいい\" +\\ trend +\"と言えば?\\nニコニー♪\" try: tw.update_status(text) print(\"trend \"+trend) except tweepy.TweepError: pass #print(\"flag:", "import tweepy # 格納しないテキストのリスト g_ngTrend = [ \"オフパコ\", \"フルチン\" ] # ファイルから読み出すので空に変更 g_ng_char", "return a # NGな単語かNGな語句が入っていたらTrue そうでないならFalse def is_ng_trend(trend): if trend in g_ngTrend: return True", "exec_path = \".\" conf_path = exec_path+\"/common/config.json\" ng_char_path = exec_path+\"/common/ng_char.json\" sys.path.insert(0,exec_path) from common import", "query.count() > 0 ): return False #ここに品詞判定辺り入れる t = model.Trend() t.text = text", "text #t.datetime = jTime dbSession.add(t) return True def main(): # twitterから発言を取ってきてDBに格納する userdata =", "ニコだって\" + trend +\\ \"くらいできるんだから!!\" else: text = trend + \"と言えば?\\nニコニー♪\\nかわいい\" +\\ trend", "def check_text(text, dbSession): if( is_ng_trend(text) ): return False #jTime = created_at + datetime.timedelta(hours", "= os.path.abspath(os.path.dirname(__file__)).rsplit(\"/\",1)[0] exec_path = \".\" conf_path = exec_path+\"/common/config.json\" ng_char_path = exec_path+\"/common/ng_char.json\" sys.path.insert(0,exec_path) from", "tw.home_timeline(page = page_number, count=10) #Toyko座標ベタ打ち woeid = tw.trends_closest(35.652832, 139.839478)[0]['woeid'] trends_place = tw.trends_place(woeid) l", "created_at + datetime.timedelta(hours = 9) query = dbSession.query(model.Trend).filter( model.Trend.text == text ) if(", "-*- coding: utf-8 -*- import sys import os import json import datetime from", "def read_json(fileName): file = open(fileName,'r') a = json.loads(file.read()) file.close() return a # NGな単語かNGな語句が入っていたらTrue", "trend +\\ \"くらいできるんだから!!\" else: text = trend + \"と言えば?\\nニコニー♪\\nかわいい\" +\\ trend +\"と言えば?\\nニコニー♪\" try:", "= True 重複してたり、RTだったり = False \"\"\" def check_text(text, dbSession): if( is_ng_trend(text) ): return", "python # -*- coding: utf-8 -*- import sys import os import json import", "tweepy # 格納しないテキストのリスト g_ngTrend = [ \"オフパコ\", \"フルチン\" ] # ファイルから読み出すので空に変更 g_ng_char =", "def is_ng_trend(trend): if trend in g_ngTrend: return True for ng_char in g_ng_char: if", "is_ng_trend(text) ): return False #jTime = created_at + datetime.timedelta(hours = 9) query =", "0 update_flag = True while update_flag: update_flag = False page_number += 1 if", "is_ng_trend(trend): if trend in g_ngTrend: return True for ng_char in g_ng_char: if ng_char", "and_ import random # /home/*/hama_dbとかが返ってくる #exec_path = os.path.abspath(os.path.dirname(__file__)).rsplit(\"/\",1)[0] exec_path = \".\" conf_path =", "# NGな単語かNGな語句が入っていたらTrue そうでないならFalse def is_ng_trend(trend): if trend in g_ngTrend: return True for ng_char", "update_flag = True while update_flag: update_flag = False page_number += 1 if page_number", "\"な、なによ……! ニコだって\" + trend +\\ \"くらいできるんだから!!\" else: text = trend + \"と言えば?\\nニコニー♪\\nかわいい\" +\\", "if( query.count() > 0 ): return False #ここに品詞判定辺り入れる t = model.Trend() t.text =", "try: tw.update_status(text) print(\"trend \"+trend) except tweepy.TweepError: pass #print(\"flag: \", update_flag) if update_flag: break", "import random # /home/*/hama_dbとかが返ってくる #exec_path = os.path.abspath(os.path.dirname(__file__)).rsplit(\"/\",1)[0] exec_path = \".\" conf_path = exec_path+\"/common/config.json\"", "= created_at + datetime.timedelta(hours = 9) query = dbSession.query(model.Trend).filter( model.Trend.text == text )", "return False #ここに品詞判定辺り入れる t = model.Trend() t.text = text #t.datetime = jTime dbSession.add(t)", "= check_text(trend, dbSession) if(not(update_flag)): continue if(random.randint(0,1)): text = \"な、なによ……! ニコだって\" + trend +\\", "page_number > 1: break #l = tw.home_timeline(page = page_number, count=10) #Toyko座標ベタ打ち woeid =", "conf_path = exec_path+\"/common/config.json\" ng_char_path = exec_path+\"/common/ng_char.json\" sys.path.insert(0,exec_path) from common import auth_api, model import", "if trend in g_ngTrend: return True for ng_char in g_ng_char: if ng_char in" ]
[ "if len(question[\"options\"]) > 10: raise ValueError(\"options array is greater than 10\") for option", "raise ValueError(\"options array is greater than 10\") for option in question[\"options\"]: if len(option)", "Basic logging setup \"\"\" logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO) return", "than 200 chars\") if len(question[\"question\"]) > 255: raise ValueError(\"question value is greater than", "in question id: {question['question_id']}\") if len(question[\"explanation\"]) > 200: raise ValueError(\"explanation value is greater", "value is greater than 200 chars\") if len(question[\"question\"]) > 255: raise ValueError(\"question value", "logger.info(\"start processing questions\") for question in questions: logger.info(f\"check limitations for question id: {question['question_id']}", "{k: type_serializer.serialize(v) for k,v in question.items()} return question def upload_to_dynamo(client, question): raw_question =", "%(message)s', level=logging.INFO) return logging def load_questions(): with open(\"questions.json\", \"r\") as f: questions =", "\"\"\" Basic logging setup \"\"\" logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)", "return logging def load_questions(): with open(\"questions.json\", \"r\") as f: questions = json.loads(f.read()) return", "not found in question id: {question['question_id']}\") elif not \"options\" in question: raise KeyError(f\"options", "in question.items()} return question def upload_to_dynamo(client, question): raw_question = serialize(question) client.put_item( TableName=DYNAMODB_TABLE_NAME, Item=raw_question", "limitations for question id: {question['question_id']} \") check_limitations(question) logger.info(f\"Limitation check pass, start uploading to", "processing questions\") for question in questions: logger.info(f\"check limitations for question id: {question['question_id']} \")", "KeyError(f\"options key not found in question id: {question['question_id']}\") elif not \"correct_option\" in question:", "raise ValueError(f\"option: {option} is grater than 100 chars\") def serialize(question, type_serializer = TypeSerializer()):", "is greater than 200 chars\") if len(question[\"question\"]) > 255: raise ValueError(\"question value is", "if len(question[\"question\"]) > 255: raise ValueError(\"question value is greater than 255 chars\") if", "is grater than 100 chars\") def serialize(question, type_serializer = TypeSerializer()): question = {k:", "question id: {question['question_id']} \") check_limitations(question) logger.info(f\"Limitation check pass, start uploading to dynamodb\") upload_to_dynamo(client_dynamo,", "key not found in question id: {question['question_id']}\") elif not \"question\" in question: raise", "is greater than 10\") for option in question[\"options\"]: if len(option) > 100: raise", "chars\") def serialize(question, type_serializer = TypeSerializer()): question = {k: type_serializer.serialize(v) for k,v in", "logger.info(f\"check limitations for question id: {question['question_id']} \") check_limitations(question) logger.info(f\"Limitation check pass, start uploading", "f: questions = json.loads(f.read()) return questions def check_limitations(question): if not \"explanation\" in question:", "%(name)s - %(levelname)s - %(message)s', level=logging.INFO) return logging def load_questions(): with open(\"questions.json\", \"r\")", "{question['question_id']}\") elif not \"correct_option\" in question: raise KeyError(f\"correct_option key not found in question", "array is greater than 10\") for option in question[\"options\"]: if len(option) > 100:", "id: {question['question_id']}\") if len(question[\"explanation\"]) > 200: raise ValueError(\"explanation value is greater than 200", "in questions: logger.info(f\"check limitations for question id: {question['question_id']} \") check_limitations(question) logger.info(f\"Limitation check pass,", "in question id: {question['question_id']}\") elif not \"question\" in question: raise KeyError(f\"question key not", "10: raise ValueError(\"options array is greater than 10\") for option in question[\"options\"]: if", "check_limitations(question) logger.info(f\"Limitation check pass, start uploading to dynamodb\") upload_to_dynamo(client_dynamo, question) if __name__ ==", "id: {question['question_id']} \") check_limitations(question) logger.info(f\"Limitation check pass, start uploading to dynamodb\") upload_to_dynamo(client_dynamo, question)", "from boto3.dynamodb.types import TypeDeserializer, TypeSerializer import json import logging DYNAMODB_TABLE_NAME = \"quizzes_questions\" def", "logging DYNAMODB_TABLE_NAME = \"quizzes_questions\" def setup_logging(): \"\"\" Basic logging setup \"\"\" logging.basicConfig(format='%(asctime)s -", "main(): client_dynamo = boto3.client('dynamodb') logger = setup_logging() logger.info(\"loadding questions from questions.json\") questions =", "return question def upload_to_dynamo(client, question): raw_question = serialize(question) client.put_item( TableName=DYNAMODB_TABLE_NAME, Item=raw_question ) def", "if len(question[\"explanation\"]) > 200: raise ValueError(\"explanation value is greater than 200 chars\") if", "in question[\"options\"]: if len(option) > 100: raise ValueError(f\"option: {option} is grater than 100", "question: raise KeyError(f\"explanation key not found in question id: {question['question_id']}\") elif not \"question\"", "json.loads(f.read()) return questions def check_limitations(question): if not \"explanation\" in question: raise KeyError(f\"explanation key", "def upload_to_dynamo(client, question): raw_question = serialize(question) client.put_item( TableName=DYNAMODB_TABLE_NAME, Item=raw_question ) def main(): client_dynamo", "question): raw_question = serialize(question) client.put_item( TableName=DYNAMODB_TABLE_NAME, Item=raw_question ) def main(): client_dynamo = boto3.client('dynamodb')", "questions = json.loads(f.read()) return questions def check_limitations(question): if not \"explanation\" in question: raise", "len(option) > 100: raise ValueError(f\"option: {option} is grater than 100 chars\") def serialize(question,", "\"\"\" logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO) return logging def load_questions():", "raise KeyError(f\"options key not found in question id: {question['question_id']}\") elif not \"correct_option\" in", "grater than 100 chars\") def serialize(question, type_serializer = TypeSerializer()): question = {k: type_serializer.serialize(v)", "logger = setup_logging() logger.info(\"loadding questions from questions.json\") questions = load_questions() logger.info(\"start processing questions\")", "from questions.json\") questions = load_questions() logger.info(\"start processing questions\") for question in questions: logger.info(f\"check", "> 100: raise ValueError(f\"option: {option} is grater than 100 chars\") def serialize(question, type_serializer", "len(question[\"options\"]) > 10: raise ValueError(\"options array is greater than 10\") for option in", "serialize(question) client.put_item( TableName=DYNAMODB_TABLE_NAME, Item=raw_question ) def main(): client_dynamo = boto3.client('dynamodb') logger = setup_logging()", "DYNAMODB_TABLE_NAME = \"quizzes_questions\" def setup_logging(): \"\"\" Basic logging setup \"\"\" logging.basicConfig(format='%(asctime)s - %(name)s", "setup_logging(): \"\"\" Basic logging setup \"\"\" logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',", "raise KeyError(f\"explanation key not found in question id: {question['question_id']}\") elif not \"question\" in", "upload_to_dynamo(client, question): raw_question = serialize(question) client.put_item( TableName=DYNAMODB_TABLE_NAME, Item=raw_question ) def main(): client_dynamo =", "Item=raw_question ) def main(): client_dynamo = boto3.client('dynamodb') logger = setup_logging() logger.info(\"loadding questions from", "check_limitations(question): if not \"explanation\" in question: raise KeyError(f\"explanation key not found in question", "import boto3 from boto3.dynamodb.types import TypeDeserializer, TypeSerializer import json import logging DYNAMODB_TABLE_NAME =", "def check_limitations(question): if not \"explanation\" in question: raise KeyError(f\"explanation key not found in", "def serialize(question, type_serializer = TypeSerializer()): question = {k: type_serializer.serialize(v) for k,v in question.items()}", "import json import logging DYNAMODB_TABLE_NAME = \"quizzes_questions\" def setup_logging(): \"\"\" Basic logging setup", "elif not \"correct_option\" in question: raise KeyError(f\"correct_option key not found in question id:", "raise ValueError(\"question value is greater than 255 chars\") if len(question[\"options\"]) > 10: raise", "question: raise KeyError(f\"question key not found in question id: {question['question_id']}\") elif not \"options\"", "if not \"explanation\" in question: raise KeyError(f\"explanation key not found in question id:", "found in question id: {question['question_id']}\") if len(question[\"explanation\"]) > 200: raise ValueError(\"explanation value is", "ValueError(f\"option: {option} is grater than 100 chars\") def serialize(question, type_serializer = TypeSerializer()): question", "TableName=DYNAMODB_TABLE_NAME, Item=raw_question ) def main(): client_dynamo = boto3.client('dynamodb') logger = setup_logging() logger.info(\"loadding questions", "> 200: raise ValueError(\"explanation value is greater than 200 chars\") if len(question[\"question\"]) >", "found in question id: {question['question_id']}\") elif not \"correct_option\" in question: raise KeyError(f\"correct_option key", "if len(option) > 100: raise ValueError(f\"option: {option} is grater than 100 chars\") def", "with open(\"questions.json\", \"r\") as f: questions = json.loads(f.read()) return questions def check_limitations(question): if", "10\") for option in question[\"options\"]: if len(option) > 100: raise ValueError(f\"option: {option} is", "question: raise KeyError(f\"options key not found in question id: {question['question_id']}\") elif not \"correct_option\"", "questions from questions.json\") questions = load_questions() logger.info(\"start processing questions\") for question in questions:", "elif not \"question\" in question: raise KeyError(f\"question key not found in question id:", "- %(levelname)s - %(message)s', level=logging.INFO) return logging def load_questions(): with open(\"questions.json\", \"r\") as", "question.items()} return question def upload_to_dynamo(client, question): raw_question = serialize(question) client.put_item( TableName=DYNAMODB_TABLE_NAME, Item=raw_question )", "def setup_logging(): \"\"\" Basic logging setup \"\"\" logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s -", "k,v in question.items()} return question def upload_to_dynamo(client, question): raw_question = serialize(question) client.put_item( TableName=DYNAMODB_TABLE_NAME,", "in question: raise KeyError(f\"question key not found in question id: {question['question_id']}\") elif not", "%(levelname)s - %(message)s', level=logging.INFO) return logging def load_questions(): with open(\"questions.json\", \"r\") as f:", "KeyError(f\"correct_option key not found in question id: {question['question_id']}\") if len(question[\"explanation\"]) > 200: raise", "key not found in question id: {question['question_id']}\") if len(question[\"explanation\"]) > 200: raise ValueError(\"explanation", "greater than 200 chars\") if len(question[\"question\"]) > 255: raise ValueError(\"question value is greater", "in question: raise KeyError(f\"explanation key not found in question id: {question['question_id']}\") elif not", "questions.json\") questions = load_questions() logger.info(\"start processing questions\") for question in questions: logger.info(f\"check limitations", "\"r\") as f: questions = json.loads(f.read()) return questions def check_limitations(question): if not \"explanation\"", "value is greater than 255 chars\") if len(question[\"options\"]) > 10: raise ValueError(\"options array", "logging def load_questions(): with open(\"questions.json\", \"r\") as f: questions = json.loads(f.read()) return questions", "check pass, start uploading to dynamodb\") upload_to_dynamo(client_dynamo, question) if __name__ == \"__main__\": main()", "than 255 chars\") if len(question[\"options\"]) > 10: raise ValueError(\"options array is greater than", "not \"explanation\" in question: raise KeyError(f\"explanation key not found in question id: {question['question_id']}\")", "not found in question id: {question['question_id']}\") elif not \"question\" in question: raise KeyError(f\"question", "not found in question id: {question['question_id']}\") elif not \"correct_option\" in question: raise KeyError(f\"correct_option", "chars\") if len(question[\"question\"]) > 255: raise ValueError(\"question value is greater than 255 chars\")", "{question['question_id']} \") check_limitations(question) logger.info(f\"Limitation check pass, start uploading to dynamodb\") upload_to_dynamo(client_dynamo, question) if", "not \"correct_option\" in question: raise KeyError(f\"correct_option key not found in question id: {question['question_id']}\")", "TypeSerializer import json import logging DYNAMODB_TABLE_NAME = \"quizzes_questions\" def setup_logging(): \"\"\" Basic logging", "= \"quizzes_questions\" def setup_logging(): \"\"\" Basic logging setup \"\"\" logging.basicConfig(format='%(asctime)s - %(name)s -", "= json.loads(f.read()) return questions def check_limitations(question): if not \"explanation\" in question: raise KeyError(f\"explanation", "question[\"options\"]: if len(option) > 100: raise ValueError(f\"option: {option} is grater than 100 chars\")", "\"correct_option\" in question: raise KeyError(f\"correct_option key not found in question id: {question['question_id']}\") if", "logger.info(f\"Limitation check pass, start uploading to dynamodb\") upload_to_dynamo(client_dynamo, question) if __name__ == \"__main__\":", "client.put_item( TableName=DYNAMODB_TABLE_NAME, Item=raw_question ) def main(): client_dynamo = boto3.client('dynamodb') logger = setup_logging() logger.info(\"loadding", "- %(message)s', level=logging.INFO) return logging def load_questions(): with open(\"questions.json\", \"r\") as f: questions", "boto3.dynamodb.types import TypeDeserializer, TypeSerializer import json import logging DYNAMODB_TABLE_NAME = \"quizzes_questions\" def setup_logging():", "= serialize(question) client.put_item( TableName=DYNAMODB_TABLE_NAME, Item=raw_question ) def main(): client_dynamo = boto3.client('dynamodb') logger =", "{option} is grater than 100 chars\") def serialize(question, type_serializer = TypeSerializer()): question =", "question id: {question['question_id']}\") elif not \"correct_option\" in question: raise KeyError(f\"correct_option key not found", "id: {question['question_id']}\") elif not \"correct_option\" in question: raise KeyError(f\"correct_option key not found in", "option in question[\"options\"]: if len(option) > 100: raise ValueError(f\"option: {option} is grater than", "100: raise ValueError(f\"option: {option} is grater than 100 chars\") def serialize(question, type_serializer =", "questions\") for question in questions: logger.info(f\"check limitations for question id: {question['question_id']} \") check_limitations(question)", "question id: {question['question_id']}\") elif not \"options\" in question: raise KeyError(f\"options key not found", "in question: raise KeyError(f\"options key not found in question id: {question['question_id']}\") elif not", "setup_logging() logger.info(\"loadding questions from questions.json\") questions = load_questions() logger.info(\"start processing questions\") for question", "question in questions: logger.info(f\"check limitations for question id: {question['question_id']} \") check_limitations(question) logger.info(f\"Limitation check", "> 10: raise ValueError(\"options array is greater than 10\") for option in question[\"options\"]:", "ValueError(\"options array is greater than 10\") for option in question[\"options\"]: if len(option) >", "TypeSerializer()): question = {k: type_serializer.serialize(v) for k,v in question.items()} return question def upload_to_dynamo(client,", "import logging DYNAMODB_TABLE_NAME = \"quizzes_questions\" def setup_logging(): \"\"\" Basic logging setup \"\"\" logging.basicConfig(format='%(asctime)s", "questions: logger.info(f\"check limitations for question id: {question['question_id']} \") check_limitations(question) logger.info(f\"Limitation check pass, start", "in question: raise KeyError(f\"correct_option key not found in question id: {question['question_id']}\") if len(question[\"explanation\"])", "question def upload_to_dynamo(client, question): raw_question = serialize(question) client.put_item( TableName=DYNAMODB_TABLE_NAME, Item=raw_question ) def main():", "than 10\") for option in question[\"options\"]: if len(option) > 100: raise ValueError(f\"option: {option}", "level=logging.INFO) return logging def load_questions(): with open(\"questions.json\", \"r\") as f: questions = json.loads(f.read())", "TypeDeserializer, TypeSerializer import json import logging DYNAMODB_TABLE_NAME = \"quizzes_questions\" def setup_logging(): \"\"\" Basic", "\"quizzes_questions\" def setup_logging(): \"\"\" Basic logging setup \"\"\" logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s", ") def main(): client_dynamo = boto3.client('dynamodb') logger = setup_logging() logger.info(\"loadding questions from questions.json\")", "key not found in question id: {question['question_id']}\") elif not \"correct_option\" in question: raise", "logger.info(\"loadding questions from questions.json\") questions = load_questions() logger.info(\"start processing questions\") for question in", "key not found in question id: {question['question_id']}\") elif not \"options\" in question: raise", "logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO) return logging def load_questions(): with", "> 255: raise ValueError(\"question value is greater than 255 chars\") if len(question[\"options\"]) >", "in question id: {question['question_id']}\") elif not \"correct_option\" in question: raise KeyError(f\"correct_option key not", "200 chars\") if len(question[\"question\"]) > 255: raise ValueError(\"question value is greater than 255", "elif not \"options\" in question: raise KeyError(f\"options key not found in question id:", "255 chars\") if len(question[\"options\"]) > 10: raise ValueError(\"options array is greater than 10\")", "client_dynamo = boto3.client('dynamodb') logger = setup_logging() logger.info(\"loadding questions from questions.json\") questions = load_questions()", "as f: questions = json.loads(f.read()) return questions def check_limitations(question): if not \"explanation\" in", "return questions def check_limitations(question): if not \"explanation\" in question: raise KeyError(f\"explanation key not", "= TypeSerializer()): question = {k: type_serializer.serialize(v) for k,v in question.items()} return question def", "255: raise ValueError(\"question value is greater than 255 chars\") if len(question[\"options\"]) > 10:", "= {k: type_serializer.serialize(v) for k,v in question.items()} return question def upload_to_dynamo(client, question): raw_question", "question = {k: type_serializer.serialize(v) for k,v in question.items()} return question def upload_to_dynamo(client, question):", "= load_questions() logger.info(\"start processing questions\") for question in questions: logger.info(f\"check limitations for question", "questions def check_limitations(question): if not \"explanation\" in question: raise KeyError(f\"explanation key not found", "id: {question['question_id']}\") elif not \"options\" in question: raise KeyError(f\"options key not found in", "KeyError(f\"explanation key not found in question id: {question['question_id']}\") elif not \"question\" in question:", "than 100 chars\") def serialize(question, type_serializer = TypeSerializer()): question = {k: type_serializer.serialize(v) for", "raw_question = serialize(question) client.put_item( TableName=DYNAMODB_TABLE_NAME, Item=raw_question ) def main(): client_dynamo = boto3.client('dynamodb') logger", "is greater than 255 chars\") if len(question[\"options\"]) > 10: raise ValueError(\"options array is", "in question id: {question['question_id']}\") elif not \"options\" in question: raise KeyError(f\"options key not", "\"options\" in question: raise KeyError(f\"options key not found in question id: {question['question_id']}\") elif", "boto3.client('dynamodb') logger = setup_logging() logger.info(\"loadding questions from questions.json\") questions = load_questions() logger.info(\"start processing", "question: raise KeyError(f\"correct_option key not found in question id: {question['question_id']}\") if len(question[\"explanation\"]) >", "ValueError(\"explanation value is greater than 200 chars\") if len(question[\"question\"]) > 255: raise ValueError(\"question", "type_serializer.serialize(v) for k,v in question.items()} return question def upload_to_dynamo(client, question): raw_question = serialize(question)", "raise ValueError(\"explanation value is greater than 200 chars\") if len(question[\"question\"]) > 255: raise", "id: {question['question_id']}\") elif not \"question\" in question: raise KeyError(f\"question key not found in", "questions = load_questions() logger.info(\"start processing questions\") for question in questions: logger.info(f\"check limitations for", "for k,v in question.items()} return question def upload_to_dynamo(client, question): raw_question = serialize(question) client.put_item(", "serialize(question, type_serializer = TypeSerializer()): question = {k: type_serializer.serialize(v) for k,v in question.items()} return", "setup \"\"\" logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO) return logging def", "ValueError(\"question value is greater than 255 chars\") if len(question[\"options\"]) > 10: raise ValueError(\"options", "json import logging DYNAMODB_TABLE_NAME = \"quizzes_questions\" def setup_logging(): \"\"\" Basic logging setup \"\"\"", "for question in questions: logger.info(f\"check limitations for question id: {question['question_id']} \") check_limitations(question) logger.info(f\"Limitation", "not \"options\" in question: raise KeyError(f\"options key not found in question id: {question['question_id']}\")", "for option in question[\"options\"]: if len(option) > 100: raise ValueError(f\"option: {option} is grater", "for question id: {question['question_id']} \") check_limitations(question) logger.info(f\"Limitation check pass, start uploading to dynamodb\")", "load_questions(): with open(\"questions.json\", \"r\") as f: questions = json.loads(f.read()) return questions def check_limitations(question):", "def load_questions(): with open(\"questions.json\", \"r\") as f: questions = json.loads(f.read()) return questions def", "open(\"questions.json\", \"r\") as f: questions = json.loads(f.read()) return questions def check_limitations(question): if not", "def main(): client_dynamo = boto3.client('dynamodb') logger = setup_logging() logger.info(\"loadding questions from questions.json\") questions", "type_serializer = TypeSerializer()): question = {k: type_serializer.serialize(v) for k,v in question.items()} return question", "import TypeDeserializer, TypeSerializer import json import logging DYNAMODB_TABLE_NAME = \"quizzes_questions\" def setup_logging(): \"\"\"", "chars\") if len(question[\"options\"]) > 10: raise ValueError(\"options array is greater than 10\") for", "{question['question_id']}\") elif not \"question\" in question: raise KeyError(f\"question key not found in question", "KeyError(f\"question key not found in question id: {question['question_id']}\") elif not \"options\" in question:", "\") check_limitations(question) logger.info(f\"Limitation check pass, start uploading to dynamodb\") upload_to_dynamo(client_dynamo, question) if __name__", "raise KeyError(f\"correct_option key not found in question id: {question['question_id']}\") if len(question[\"explanation\"]) > 200:", "{question['question_id']}\") if len(question[\"explanation\"]) > 200: raise ValueError(\"explanation value is greater than 200 chars\")", "question id: {question['question_id']}\") elif not \"question\" in question: raise KeyError(f\"question key not found", "= setup_logging() logger.info(\"loadding questions from questions.json\") questions = load_questions() logger.info(\"start processing questions\") for", "- %(name)s - %(levelname)s - %(message)s', level=logging.INFO) return logging def load_questions(): with open(\"questions.json\",", "greater than 10\") for option in question[\"options\"]: if len(option) > 100: raise ValueError(f\"option:", "200: raise ValueError(\"explanation value is greater than 200 chars\") if len(question[\"question\"]) > 255:", "raise KeyError(f\"question key not found in question id: {question['question_id']}\") elif not \"options\" in", "load_questions() logger.info(\"start processing questions\") for question in questions: logger.info(f\"check limitations for question id:", "boto3 from boto3.dynamodb.types import TypeDeserializer, TypeSerializer import json import logging DYNAMODB_TABLE_NAME = \"quizzes_questions\"", "\"question\" in question: raise KeyError(f\"question key not found in question id: {question['question_id']}\") elif", "not found in question id: {question['question_id']}\") if len(question[\"explanation\"]) > 200: raise ValueError(\"explanation value", "greater than 255 chars\") if len(question[\"options\"]) > 10: raise ValueError(\"options array is greater", "{question['question_id']}\") elif not \"options\" in question: raise KeyError(f\"options key not found in question", "not \"question\" in question: raise KeyError(f\"question key not found in question id: {question['question_id']}\")", "\"explanation\" in question: raise KeyError(f\"explanation key not found in question id: {question['question_id']}\") elif", "found in question id: {question['question_id']}\") elif not \"question\" in question: raise KeyError(f\"question key", "logging setup \"\"\" logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO) return logging", "found in question id: {question['question_id']}\") elif not \"options\" in question: raise KeyError(f\"options key", "len(question[\"explanation\"]) > 200: raise ValueError(\"explanation value is greater than 200 chars\") if len(question[\"question\"])", "= boto3.client('dynamodb') logger = setup_logging() logger.info(\"loadding questions from questions.json\") questions = load_questions() logger.info(\"start", "len(question[\"question\"]) > 255: raise ValueError(\"question value is greater than 255 chars\") if len(question[\"options\"])", "question id: {question['question_id']}\") if len(question[\"explanation\"]) > 200: raise ValueError(\"explanation value is greater than", "100 chars\") def serialize(question, type_serializer = TypeSerializer()): question = {k: type_serializer.serialize(v) for k,v" ]
[ "OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** ############################################################################# # TODO: # #", "dW[:,y[i]] += -1*X[i] # Right now the loss is a sum over all", "correct_scores = correct_scores_vec[:,np.newaxis] # broadcasting-ready vec # compute margins M = S -", "loss is a sum over all training examples, but we want it #", "correct classes (one-hot encodig of y_i) mask = np.eye(W.shape[1], dtype=bool)[y] # correct scores", "(DO NOT DELETE/MODIFY THIS LINE)***** # We'll use dpM to store two contributions", "a minibatch of data. - y: A numpy array of shape (N,) containing", "# second contributoin subtract fro self self sum of others active dpM[mask] =", "of data. - y: A numpy array of shape (N,) containing training labels;", "a result you may need to modify some of the # # code", "c means that X[i] has label c, where 0 <= c < C.", "OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** S = X.dot(W) # scores", "= X.T.dot(dpM) dpM = np.zeros((X.shape[0], W.shape[1])) # N x C zeros # first", "dpM[mask] = -1*np.sum(pMactive, axis=1) # gadiaent dW = 1.0/num_train * X.T.dot(dpM) + 2*W", "X.T.dot(dpM) + 2*W # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****", "of same shape as W \"\"\" dW = np.zeros(W.shape) # 3073 x 10", "sum of others active dpM[mask] = -1*np.sum(pMactive, axis=1) # gadiaent dW = 1.0/num_train", "x C zeros # first contributoin (all active margins for others) pMactive =", "from scratch, it may be easier # # to reuse some of the", "the gradient num_classes = W.shape[1] num_train = X.shape[0] loss = 0.0 for i", "correct_scores + 1 # margins (N,C) M[mask] = 0 pM = np.where(M>0, M,", "D) containing a minibatch of data. - y: A numpy array of shape", "(N,) containing training labels; y[i] = c means that X[i] has label c,", "be easier # # to reuse some of the intermediate values that you", "in range(num_classes): if j == y[i]: continue margin = scores[j] - correct_class_score +", "float - gradient with respect to weights W; an array of same shape", "import shuffle from past.builtins import xrange def svm_loss_naive(W, X, y, reg): \"\"\" Structured", "the structured SVM # # loss, storing the result in dW. # #", "compute the # # loss. # ############################################################################# # *****START OF YOUR CODE (DO", "TODO: # # Implement a vectorized version of the structured SVM loss, storing", "np.eye(W.shape[1], dtype=bool)[y] # correct scores which we'll be subtracting from all other correct_scores_vec", "it # to be an average instead so we divide by num_train. loss", "loss loss = 1.0/num_train * np.sum(pM) + reg * np.sum(W * W) #", "builtins import range import numpy as np from random import shuffle from past.builtins", "that tells us which rows of X we # should to include in", "reuse some of the intermediate values that you used to compute the #", "xrange def svm_loss_naive(W, X, y, reg): \"\"\" Structured SVM loss function, naive implementation", "instead so we divide by num_train. loss /= num_train # 1/N factor in", "############################################################################# # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** # We'll", "as svm_loss_naive. \"\"\" num_train = X.shape[0] ############################################################################# # TODO: # # Implement a", "YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** dW += 2*W # *****END OF", "zeros # compute the loss and the gradient num_classes = W.shape[1] num_train =", "in front dW /= num_train # Add regularization to the loss. loss +=", "# # code above to compute the gradient. # ############################################################################# # *****START OF", "maring conributions + regularization # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS", "y_i) mask = np.eye(W.shape[1], dtype=bool)[y] # correct scores which we'll be subtracting from", "N x C zeros # first contributoin (all active margins for others) pMactive", "training examples, but we want it # to be an average instead so", "(DO NOT DELETE/MODIFY THIS LINE)***** ############################################################################# # TODO: # # Implement a vectorized", "YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** # We'll use dpM to store", "function and store it dW. # # Rather than first computing the loss", "dW. # # Rather than first computing the loss and then computing the", "# Hint: Instead of computing the gradient from scratch, it may be easier", "margin = scores[j] - correct_class_score + 1 # note delta = 1 if", "as single float - gradient with respect to weights W; an array of", "storing the # # result in loss. # ############################################################################# # *****START OF YOUR", "dW[:,j] += 1*X[i] dW[:,y[i]] += -1*X[i] # Right now the loss is a", "correct_scores_vec = np.sum(np.where(mask, S, 0), axis=1) correct_scores = correct_scores_vec[:,np.newaxis] # broadcasting-ready vec #", "M = S - correct_scores + 1 # margins (N,C) M[mask] = 0", "derivative, # # it may be simpler to compute the derivative at the", "of y_i) mask = np.eye(W.shape[1], dtype=bool)[y] # correct scores which we'll be subtracting", "of the intermediate values that you used to compute the # # loss.", "fro self self sum of others active dpM[mask] = -1*np.sum(pMactive, axis=1) # gadiaent", "a sum over all training examples, but we want it # to be", "the same as svm_loss_naive. \"\"\" num_train = X.shape[0] ############################################################################# # TODO: # #", "pMactive = np.where(M>0, 1, 0) dpM += pMactive # second contributoin subtract fro", "A numpy array of shape (N, D) containing a minibatch of data. -", "easier # # to reuse some of the intermediate values that you used", "\"\"\" Structured SVM loss function, vectorized implementation. Inputs and outputs are the same", "\"\"\" Structured SVM loss function, naive implementation (with loops). Inputs have dimension D,", "time that the # # loss is being computed. As a result you", "loss function, vectorized implementation. Inputs and outputs are the same as svm_loss_naive. \"\"\"", "- correct_scores + 1 # margins (N,C) M[mask] = 0 pM = np.where(M>0,", "over all training examples, but we want it # to be an average", "reg): \"\"\" Structured SVM loss function, naive implementation (with loops). Inputs have dimension", "are C classes, and we operate on minibatches of N examples. Inputs: -", "j in range(num_classes): if j == y[i]: continue margin = scores[j] - correct_class_score", "we operate on minibatches of N examples. Inputs: - W: A numpy array", "C zeros # first contributoin (all active margins for others) pMactive = np.where(M>0,", "contributions that tells us which rows of X we # should to include", "an average instead so we divide by num_train. loss /= num_train # 1/N", "so we divide by num_train. loss /= num_train # 1/N factor in front", "the derivative, # # it may be simpler to compute the derivative at", "OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** return loss, dW def svm_loss_vectorized(W,", "SVM loss function, naive implementation (with loops). Inputs have dimension D, there are", "X.shape[0] loss = 0.0 for i in range(num_train): scores = X[i].dot(W) correct_class_score =", "dW. # # # # Hint: Instead of computing the gradient from scratch,", "W.shape[1])) # N x C zeros # first contributoin (all active margins for", "Structured SVM loss function, naive implementation (with loops). Inputs have dimension D, there", "compute the loss and the gradient num_classes = W.shape[1] num_train = X.shape[0] loss", "= X[i].dot(W) correct_class_score = scores[y[i]] for j in range(num_classes): if j == y[i]:", "single float - gradient with respect to weights W; an array of same", "random import shuffle from past.builtins import xrange def svm_loss_naive(W, X, y, reg): \"\"\"", "want it # to be an average instead so we divide by num_train.", "pM = np.where(M>0, M, 0) # positive marings # compute loss loss =", "CODE (DO NOT DELETE/MODIFY THIS LINE)***** return loss, dW def svm_loss_vectorized(W, X, y,", "Instead of computing the gradient from scratch, it may be easier # #", "Inputs: - W: A numpy array of shape (D, C) containing weights. -", "but we want it # to be an average instead so we divide", "X, y, reg): \"\"\" Structured SVM loss function, naive implementation (with loops). Inputs", "X we # should to include in the calculation of dW = X.T.dot(dpM)", "C classes, and we operate on minibatches of N examples. Inputs: - W:", "# loss is being computed. As a result you may need to modify", "y, reg): \"\"\" Structured SVM loss function, vectorized implementation. Inputs and outputs are", "= c means that X[i] has label c, where 0 <= c <", "y[i]: continue margin = scores[j] - correct_class_score + 1 # note delta =", "- y: A numpy array of shape (N,) containing training labels; y[i] =", "THIS LINE)***** ############################################################################# # TODO: # # Implement a vectorized version of the", "continue margin = scores[j] - correct_class_score + 1 # note delta = 1", "num_train # 1/N factor in front dW /= num_train # Add regularization to", "to modify some of the # # code above to compute the gradient.", "store it dW. # # Rather than first computing the loss and then", "other correct_scores_vec = np.sum(np.where(mask, S, 0), axis=1) correct_scores = correct_scores_vec[:,np.newaxis] # broadcasting-ready vec", "np.sum(pM) + reg * np.sum(W * W) # maring conributions + regularization #", "containing weights. - X: A numpy array of shape (N, D) containing a", "is being computed. As a result you may need to modify some of", "we # should to include in the calculation of dW = X.T.dot(dpM) dpM", "than first computing the loss and then computing the derivative, # # it", "Returns a tuple of: - loss as single float - gradient with respect", "for others) pMactive = np.where(M>0, 1, 0) dpM += pMactive # second contributoin", "dW def svm_loss_vectorized(W, X, y, reg): \"\"\" Structured SVM loss function, vectorized implementation.", "to be an average instead so we divide by num_train. loss /= num_train", "reg): \"\"\" Structured SVM loss function, vectorized implementation. Inputs and outputs are the", "*****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** ############################################################################# # TODO: #", "of N examples. Inputs: - W: A numpy array of shape (D, C)", "loss function, naive implementation (with loops). Inputs have dimension D, there are C", "/= num_train # Add regularization to the loss. loss += reg * np.sum(W", "1 # margins (N,C) M[mask] = 0 pM = np.where(M>0, M, 0) #", "use dpM to store two contributions that tells us which rows of X", "SVM # # loss, storing the result in dW. # # # #", "others active dpM[mask] = -1*np.sum(pMactive, axis=1) # gadiaent dW = 1.0/num_train * X.T.dot(dpM)", "means that X[i] has label c, where 0 <= c < C. -", "CODE (DO NOT DELETE/MODIFY THIS LINE)***** S = X.dot(W) # scores (N,C) #", "scores (N,C) # build mask selecting only the correct classes (one-hot encodig of", "average instead so we divide by num_train. loss /= num_train # 1/N factor", "scores[y[i]] for j in range(num_classes): if j == y[i]: continue margin = scores[j]", "classes, and we operate on minibatches of N examples. Inputs: - W: A", "Implement a vectorized version of the structured SVM loss, storing the # #", "numpy as np from random import shuffle from past.builtins import xrange def svm_loss_naive(W,", "label c, where 0 <= c < C. - reg: (float) regularization strength", "range(num_classes): if j == y[i]: continue margin = scores[j] - correct_class_score + 1", "CODE (DO NOT DELETE/MODIFY THIS LINE)***** dW += 2*W # *****END OF YOUR", "# # it may be simpler to compute the derivative at the same", "num_train # Add regularization to the loss. loss += reg * np.sum(W *", "- W: A numpy array of shape (D, C) containing weights. - X:", "of shape (N,) containing training labels; y[i] = c means that X[i] has", "gradient of the loss function and store it dW. # # Rather than", "you used to compute the # # loss. # ############################################################################# # *****START OF", "same time that the # # loss is being computed. As a result", "A numpy array of shape (N,) containing training labels; y[i] = c means", "that X[i] has label c, where 0 <= c < C. - reg:", "(float) regularization strength Returns a tuple of: - loss as single float -", "be an average instead so we divide by num_train. loss /= num_train #", "some of the intermediate values that you used to compute the # #", "# 3073 x 10 zeros # compute the loss and the gradient num_classes", "0), axis=1) correct_scores = correct_scores_vec[:,np.newaxis] # broadcasting-ready vec # compute margins M =", "# Implement a vectorized version of the gradient for the structured SVM #", "np.zeros((X.shape[0], W.shape[1])) # N x C zeros # first contributoin (all active margins", "TODO: # # Compute the gradient of the loss function and store it", "contributoin subtract fro self self sum of others active dpM[mask] = -1*np.sum(pMactive, axis=1)", "modify some of the # # code above to compute the gradient. #", "delta = 1 if margin > 0: loss += margin dW[:,j] += 1*X[i]", "version of the structured SVM loss, storing the # # result in loss.", "np from random import shuffle from past.builtins import xrange def svm_loss_naive(W, X, y,", "margins for others) pMactive = np.where(M>0, 1, 0) dpM += pMactive # second", "X[i] has label c, where 0 <= c < C. - reg: (float)", "# # loss, storing the result in dW. # # # # Hint:", "W \"\"\" dW = np.zeros(W.shape) # 3073 x 10 zeros # compute the", "Rather than first computing the loss and then computing the derivative, # #", "############################################################################# # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** S =", "all training examples, but we want it # to be an average instead", "0.0 for i in range(num_train): scores = X[i].dot(W) correct_class_score = scores[y[i]] for j", "include in the calculation of dW = X.T.dot(dpM) dpM = np.zeros((X.shape[0], W.shape[1])) #", "gradient with respect to weights W; an array of same shape as W", "3073 x 10 zeros # compute the loss and the gradient num_classes =", "YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** return loss, dW def svm_loss_vectorized(W, X,", "to compute the derivative at the same time that the # # loss", "same as svm_loss_naive. \"\"\" num_train = X.shape[0] ############################################################################# # TODO: # # Implement", "= np.where(M>0, 1, 0) dpM += pMactive # second contributoin subtract fro self", "We'll use dpM to store two contributions that tells us which rows of", "shape as W \"\"\" dW = np.zeros(W.shape) # 3073 x 10 zeros #", "# correct scores which we'll be subtracting from all other correct_scores_vec = np.sum(np.where(mask,", "range import numpy as np from random import shuffle from past.builtins import xrange", "the structured SVM loss, storing the # # result in loss. # #############################################################################", "of dW = X.T.dot(dpM) dpM = np.zeros((X.shape[0], W.shape[1])) # N x C zeros", "# Add regularization to the loss. loss += reg * np.sum(W * W)", "mask selecting only the correct classes (one-hot encodig of y_i) mask = np.eye(W.shape[1],", "dimension D, there are C classes, and we operate on minibatches of N", "############################################################################# # TODO: # # Implement a vectorized version of the structured SVM", "Compute the gradient of the loss function and store it dW. # #", "SVM loss function, vectorized implementation. Inputs and outputs are the same as svm_loss_naive.", "0) dpM += pMactive # second contributoin subtract fro self self sum of", "with respect to weights W; an array of same shape as W \"\"\"", "# ############################################################################# # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** #", "+= 2*W # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** return", "to include in the calculation of dW = X.T.dot(dpM) dpM = np.zeros((X.shape[0], W.shape[1]))", "+ regularization # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** #############################################################################", "# ############################################################################# # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** S", "W; an array of same shape as W \"\"\" dW = np.zeros(W.shape) #", "correct_class_score + 1 # note delta = 1 if margin > 0: loss", "(one-hot encodig of y_i) mask = np.eye(W.shape[1], dtype=bool)[y] # correct scores which we'll", "<= c < C. - reg: (float) regularization strength Returns a tuple of:", "result you may need to modify some of the # # code above", "active dpM[mask] = -1*np.sum(pMactive, axis=1) # gadiaent dW = 1.0/num_train * X.T.dot(dpM) +", "data. - y: A numpy array of shape (N,) containing training labels; y[i]", "correct_scores_vec[:,np.newaxis] # broadcasting-ready vec # compute margins M = S - correct_scores +", "= -1*np.sum(pMactive, axis=1) # gadiaent dW = 1.0/num_train * X.T.dot(dpM) + 2*W #", "-1*np.sum(pMactive, axis=1) # gadiaent dW = 1.0/num_train * X.T.dot(dpM) + 2*W # *****END", "< C. - reg: (float) regularization strength Returns a tuple of: - loss", "the correct classes (one-hot encodig of y_i) mask = np.eye(W.shape[1], dtype=bool)[y] # correct", "S, 0), axis=1) correct_scores = correct_scores_vec[:,np.newaxis] # broadcasting-ready vec # compute margins M", "and then computing the derivative, # # it may be simpler to compute", "reg * np.sum(W * W) ############################################################################# # TODO: # # Compute the gradient", "mask = np.eye(W.shape[1], dtype=bool)[y] # correct scores which we'll be subtracting from all", "+ reg * np.sum(W * W) # maring conributions + regularization # *****END", "dpM = np.zeros((X.shape[0], W.shape[1])) # N x C zeros # first contributoin (all", "to compute the gradient. # ############################################################################# # *****START OF YOUR CODE (DO NOT", "+ 1 # note delta = 1 if margin > 0: loss +=", "YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** S = X.dot(W) # scores (N,C)", "used to compute the # # loss. # ############################################################################# # *****START OF YOUR", "the same time that the # # loss is being computed. As a", "*****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** # We'll use dpM", "have dimension D, there are C classes, and we operate on minibatches of", "being computed. As a result you may need to modify some of the", "array of shape (D, C) containing weights. - X: A numpy array of", "from random import shuffle from past.builtins import xrange def svm_loss_naive(W, X, y, reg):", "minibatches of N examples. Inputs: - W: A numpy array of shape (D,", "for i in range(num_train): scores = X[i].dot(W) correct_class_score = scores[y[i]] for j in", "1, 0) dpM += pMactive # second contributoin subtract fro self self sum", "# # Implement a vectorized version of the structured SVM loss, storing the", "# 1/N factor in front dW /= num_train # Add regularization to the", "# build mask selecting only the correct classes (one-hot encodig of y_i) mask", "shape (N, D) containing a minibatch of data. - y: A numpy array", "CODE (DO NOT DELETE/MODIFY THIS LINE)***** # We'll use dpM to store two", "dpM += pMactive # second contributoin subtract fro self self sum of others", "on minibatches of N examples. Inputs: - W: A numpy array of shape", "version of the gradient for the structured SVM # # loss, storing the", "of the gradient for the structured SVM # # loss, storing the result", "active margins for others) pMactive = np.where(M>0, 1, 0) dpM += pMactive #", "the # # loss is being computed. As a result you may need", "As a result you may need to modify some of the # #", "are the same as svm_loss_naive. \"\"\" num_train = X.shape[0] ############################################################################# # TODO: #", "build mask selecting only the correct classes (one-hot encodig of y_i) mask =", "dW += 2*W # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****", "svm_loss_naive. \"\"\" num_train = X.shape[0] ############################################################################# # TODO: # # Implement a vectorized", "self sum of others active dpM[mask] = -1*np.sum(pMactive, axis=1) # gadiaent dW =", "the # # result in loss. # ############################################################################# # *****START OF YOUR CODE", "# loss. # ############################################################################# # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS", "# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** S = X.dot(W)", "storing the result in dW. # # # # Hint: Instead of computing", "result in loss. # ############################################################################# # *****START OF YOUR CODE (DO NOT DELETE/MODIFY", "all other correct_scores_vec = np.sum(np.where(mask, S, 0), axis=1) correct_scores = correct_scores_vec[:,np.newaxis] # broadcasting-ready", "scores = X[i].dot(W) correct_class_score = scores[y[i]] for j in range(num_classes): if j ==", "broadcasting-ready vec # compute margins M = S - correct_scores + 1 #", "subtracting from all other correct_scores_vec = np.sum(np.where(mask, S, 0), axis=1) correct_scores = correct_scores_vec[:,np.newaxis]", "shape (N,) containing training labels; y[i] = c means that X[i] has label", "# N x C zeros # first contributoin (all active margins for others)", "Hint: Instead of computing the gradient from scratch, it may be easier #", "scratch, it may be easier # # to reuse some of the intermediate", "1 if margin > 0: loss += margin dW[:,j] += 1*X[i] dW[:,y[i]] +=", "# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** ############################################################################# # TODO:", "D, there are C classes, and we operate on minibatches of N examples.", "svm_loss_vectorized(W, X, y, reg): \"\"\" Structured SVM loss function, vectorized implementation. Inputs and", "THIS LINE)***** S = X.dot(W) # scores (N,C) # build mask selecting only", "numpy array of shape (D, C) containing weights. - X: A numpy array", "labels; y[i] = c means that X[i] has label c, where 0 <=", "same shape as W \"\"\" dW = np.zeros(W.shape) # 3073 x 10 zeros", "loss is being computed. As a result you may need to modify some", "= scores[j] - correct_class_score + 1 # note delta = 1 if margin", "* np.sum(W * W) ############################################################################# # TODO: # # Compute the gradient of", "that you used to compute the # # loss. # ############################################################################# # *****START", "(with loops). Inputs have dimension D, there are C classes, and we operate", "X: A numpy array of shape (N, D) containing a minibatch of data.", "dW /= num_train # Add regularization to the loss. loss += reg *", "0) # positive marings # compute loss loss = 1.0/num_train * np.sum(pM) +", "at the same time that the # # loss is being computed. As", "from builtins import range import numpy as np from random import shuffle from", "for j in range(num_classes): if j == y[i]: continue margin = scores[j] -", "= 1.0/num_train * X.T.dot(dpM) + 2*W # *****END OF YOUR CODE (DO NOT", "margin dW[:,j] += 1*X[i] dW[:,y[i]] += -1*X[i] # Right now the loss is", "may be simpler to compute the derivative at the same time that the", "then computing the derivative, # # it may be simpler to compute the", "= np.zeros(W.shape) # 3073 x 10 zeros # compute the loss and the", "compute margins M = S - correct_scores + 1 # margins (N,C) M[mask]", "+= pMactive # second contributoin subtract fro self self sum of others active", "correct_class_score = scores[y[i]] for j in range(num_classes): if j == y[i]: continue margin", "# TODO: # # Compute the gradient of the loss function and store", "correct scores which we'll be subtracting from all other correct_scores_vec = np.sum(np.where(mask, S,", "THIS LINE)***** dW += 2*W # *****END OF YOUR CODE (DO NOT DELETE/MODIFY", "sum over all training examples, but we want it # to be an", "contributoin (all active margins for others) pMactive = np.where(M>0, 1, 0) dpM +=", "# positive marings # compute loss loss = 1.0/num_train * np.sum(pM) + reg", "CODE (DO NOT DELETE/MODIFY THIS LINE)***** ############################################################################# # TODO: # # Implement a", "strength Returns a tuple of: - loss as single float - gradient with", "= 1.0/num_train * np.sum(pM) + reg * np.sum(W * W) # maring conributions", "in range(num_train): scores = X[i].dot(W) correct_class_score = scores[y[i]] for j in range(num_classes): if", "# # loss is being computed. As a result you may need to", "# TODO: # # Implement a vectorized version of the structured SVM loss,", "the loss and the gradient num_classes = W.shape[1] num_train = X.shape[0] loss =", "the # # code above to compute the gradient. # ############################################################################# # *****START", "SVM loss, storing the # # result in loss. # ############################################################################# # *****START", "values that you used to compute the # # loss. # ############################################################################# #", "as W \"\"\" dW = np.zeros(W.shape) # 3073 x 10 zeros # compute", "- X: A numpy array of shape (N, D) containing a minibatch of", "selecting only the correct classes (one-hot encodig of y_i) mask = np.eye(W.shape[1], dtype=bool)[y]", "# # result in loss. # ############################################################################# # *****START OF YOUR CODE (DO", "two contributions that tells us which rows of X we # should to", "num_train = X.shape[0] loss = 0.0 for i in range(num_train): scores = X[i].dot(W)", "* W) # maring conributions + regularization # *****END OF YOUR CODE (DO", "conributions + regularization # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****", "OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** # We'll use dpM to", "NOT DELETE/MODIFY THIS LINE)***** # We'll use dpM to store two contributions that", "array of shape (N, D) containing a minibatch of data. - y: A", "DELETE/MODIFY THIS LINE)***** return loss, dW def svm_loss_vectorized(W, X, y, reg): \"\"\" Structured", "X.dot(W) # scores (N,C) # build mask selecting only the correct classes (one-hot", "in the calculation of dW = X.T.dot(dpM) dpM = np.zeros((X.shape[0], W.shape[1])) # N", "C. - reg: (float) regularization strength Returns a tuple of: - loss as", "structured SVM loss, storing the # # result in loss. # ############################################################################# #", "past.builtins import xrange def svm_loss_naive(W, X, y, reg): \"\"\" Structured SVM loss function,", "which rows of X we # should to include in the calculation of", "# compute the loss and the gradient num_classes = W.shape[1] num_train = X.shape[0]", "* np.sum(pM) + reg * np.sum(W * W) # maring conributions + regularization", "loss += margin dW[:,j] += 1*X[i] dW[:,y[i]] += -1*X[i] # Right now the", "vectorized version of the structured SVM loss, storing the # # result in", "operate on minibatches of N examples. Inputs: - W: A numpy array of", "and outputs are the same as svm_loss_naive. \"\"\" num_train = X.shape[0] ############################################################################# #", "of: - loss as single float - gradient with respect to weights W;", "svm_loss_naive(W, X, y, reg): \"\"\" Structured SVM loss function, naive implementation (with loops).", "loops). Inputs have dimension D, there are C classes, and we operate on", "of the loss function and store it dW. # # Rather than first", "# We'll use dpM to store two contributions that tells us which rows", "second contributoin subtract fro self self sum of others active dpM[mask] = -1*np.sum(pMactive,", "np.zeros(W.shape) # 3073 x 10 zeros # compute the loss and the gradient", "X[i].dot(W) correct_class_score = scores[y[i]] for j in range(num_classes): if j == y[i]: continue", "X.shape[0] ############################################################################# # TODO: # # Implement a vectorized version of the structured", "# should to include in the calculation of dW = X.T.dot(dpM) dpM =", "compute the derivative at the same time that the # # loss is", "the intermediate values that you used to compute the # # loss. #", "the gradient. # ############################################################################# # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS", "1/N factor in front dW /= num_train # Add regularization to the loss.", "encodig of y_i) mask = np.eye(W.shape[1], dtype=bool)[y] # correct scores which we'll be", "np.sum(np.where(mask, S, 0), axis=1) correct_scores = correct_scores_vec[:,np.newaxis] # broadcasting-ready vec # compute margins", "# ############################################################################# # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** dW", "array of shape (N,) containing training labels; y[i] = c means that X[i]", "S = X.dot(W) # scores (N,C) # build mask selecting only the correct", "*****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** return loss, dW def", "NOT DELETE/MODIFY THIS LINE)***** ############################################################################# # TODO: # # Implement a vectorized version", "+= -1*X[i] # Right now the loss is a sum over all training", "# to reuse some of the intermediate values that you used to compute", "rows of X we # should to include in the calculation of dW", "LINE)***** dW += 2*W # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS", "= correct_scores_vec[:,np.newaxis] # broadcasting-ready vec # compute margins M = S - correct_scores", "Right now the loss is a sum over all training examples, but we", "W) # maring conributions + regularization # *****END OF YOUR CODE (DO NOT", "(N, D) containing a minibatch of data. - y: A numpy array of", "vectorized version of the gradient for the structured SVM # # loss, storing", "= scores[y[i]] for j in range(num_classes): if j == y[i]: continue margin =", "the derivative at the same time that the # # loss is being", "be simpler to compute the derivative at the same time that the #", "# # Rather than first computing the loss and then computing the derivative,", "where 0 <= c < C. - reg: (float) regularization strength Returns a", "containing a minibatch of data. - y: A numpy array of shape (N,)", "N examples. Inputs: - W: A numpy array of shape (D, C) containing", "a vectorized version of the gradient for the structured SVM # # loss,", "num_train. loss /= num_train # 1/N factor in front dW /= num_train #", "0: loss += margin dW[:,j] += 1*X[i] dW[:,y[i]] += -1*X[i] # Right now", "you may need to modify some of the # # code above to", "to store two contributions that tells us which rows of X we #", "A numpy array of shape (D, C) containing weights. - X: A numpy", "note delta = 1 if margin > 0: loss += margin dW[:,j] +=", "simpler to compute the derivative at the same time that the # #", "loss, storing the result in dW. # # # # Hint: Instead of", "factor in front dW /= num_train # Add regularization to the loss. loss", "marings # compute loss loss = 1.0/num_train * np.sum(pM) + reg * np.sum(W", "and we operate on minibatches of N examples. Inputs: - W: A numpy", "regularization strength Returns a tuple of: - loss as single float - gradient", "W: A numpy array of shape (D, C) containing weights. - X: A", "num_classes = W.shape[1] num_train = X.shape[0] loss = 0.0 for i in range(num_train):", "we'll be subtracting from all other correct_scores_vec = np.sum(np.where(mask, S, 0), axis=1) correct_scores", "zeros # first contributoin (all active margins for others) pMactive = np.where(M>0, 1,", "\"\"\" num_train = X.shape[0] ############################################################################# # TODO: # # Implement a vectorized version", "may need to modify some of the # # code above to compute", "TODO: # # Implement a vectorized version of the gradient for the structured", "(DO NOT DELETE/MODIFY THIS LINE)***** S = X.dot(W) # scores (N,C) # build", "Structured SVM loss function, vectorized implementation. Inputs and outputs are the same as", "an array of same shape as W \"\"\" dW = np.zeros(W.shape) # 3073", "function, naive implementation (with loops). Inputs have dimension D, there are C classes,", "containing training labels; y[i] = c means that X[i] has label c, where", "x 10 zeros # compute the loss and the gradient num_classes = W.shape[1]", "the loss function and store it dW. # # Rather than first computing", "tuple of: - loss as single float - gradient with respect to weights", "we want it # to be an average instead so we divide by", "dW = np.zeros(W.shape) # 3073 x 10 zeros # compute the loss and", "(DO NOT DELETE/MODIFY THIS LINE)***** dW += 2*W # *****END OF YOUR CODE", "# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** return loss, dW", "pMactive # second contributoin subtract fro self self sum of others active dpM[mask]", "# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** # We'll use", "store two contributions that tells us which rows of X we # should", "and the gradient num_classes = W.shape[1] num_train = X.shape[0] loss = 0.0 for", "the loss. loss += reg * np.sum(W * W) ############################################################################# # TODO: #", "c, where 0 <= c < C. - reg: (float) regularization strength Returns", "classes (one-hot encodig of y_i) mask = np.eye(W.shape[1], dtype=bool)[y] # correct scores which", "in loss. # ############################################################################# # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS", "# broadcasting-ready vec # compute margins M = S - correct_scores + 1", "= np.sum(np.where(mask, S, 0), axis=1) correct_scores = correct_scores_vec[:,np.newaxis] # broadcasting-ready vec # compute", "(D, C) containing weights. - X: A numpy array of shape (N, D)", "loss function and store it dW. # # Rather than first computing the", "y: A numpy array of shape (N,) containing training labels; y[i] = c", "it may be simpler to compute the derivative at the same time that", "may be easier # # to reuse some of the intermediate values that", "positive marings # compute loss loss = 1.0/num_train * np.sum(pM) + reg *", "# margins (N,C) M[mask] = 0 pM = np.where(M>0, M, 0) # positive", "loss /= num_train # 1/N factor in front dW /= num_train # Add", "calculation of dW = X.T.dot(dpM) dpM = np.zeros((X.shape[0], W.shape[1])) # N x C", "X, y, reg): \"\"\" Structured SVM loss function, vectorized implementation. Inputs and outputs", "we divide by num_train. loss /= num_train # 1/N factor in front dW", "code above to compute the gradient. # ############################################################################# # *****START OF YOUR CODE", "examples, but we want it # to be an average instead so we", "NOT DELETE/MODIFY THIS LINE)***** dW += 2*W # *****END OF YOUR CODE (DO", "# scores (N,C) # build mask selecting only the correct classes (one-hot encodig", "tells us which rows of X we # should to include in the", "\"\"\" dW = np.zeros(W.shape) # 3073 x 10 zeros # compute the loss", "LINE)***** S = X.dot(W) # scores (N,C) # build mask selecting only the", "the result in dW. # # # # Hint: Instead of computing the", "# # # Hint: Instead of computing the gradient from scratch, it may", "computing the derivative, # # it may be simpler to compute the derivative", "DELETE/MODIFY THIS LINE)***** # We'll use dpM to store two contributions that tells", "> 0: loss += margin dW[:,j] += 1*X[i] dW[:,y[i]] += -1*X[i] # Right", "some of the # # code above to compute the gradient. # #############################################################################", "Inputs have dimension D, there are C classes, and we operate on minibatches", "of X we # should to include in the calculation of dW =", "import numpy as np from random import shuffle from past.builtins import xrange def", "+= 1*X[i] dW[:,y[i]] += -1*X[i] # Right now the loss is a sum", "2*W # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** return loss,", "regularization to the loss. loss += reg * np.sum(W * W) ############################################################################# #", "there are C classes, and we operate on minibatches of N examples. Inputs:", "gradient. # ############################################################################# # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****", "numpy array of shape (N, D) containing a minibatch of data. - y:", "to weights W; an array of same shape as W \"\"\" dW =", "others) pMactive = np.where(M>0, 1, 0) dpM += pMactive # second contributoin subtract", "# # # # Hint: Instead of computing the gradient from scratch, it", "of computing the gradient from scratch, it may be easier # # to", "only the correct classes (one-hot encodig of y_i) mask = np.eye(W.shape[1], dtype=bool)[y] #", "of the # # code above to compute the gradient. # ############################################################################# #", "compute the gradient. # ############################################################################# # *****START OF YOUR CODE (DO NOT DELETE/MODIFY", "= X.dot(W) # scores (N,C) # build mask selecting only the correct classes", "LINE)***** # We'll use dpM to store two contributions that tells us which", "should to include in the calculation of dW = X.T.dot(dpM) dpM = np.zeros((X.shape[0],", "def svm_loss_vectorized(W, X, y, reg): \"\"\" Structured SVM loss function, vectorized implementation. Inputs", "<gh_stars>1-10 from builtins import range import numpy as np from random import shuffle", "of the structured SVM loss, storing the # # result in loss. #", "gradient num_classes = W.shape[1] num_train = X.shape[0] loss = 0.0 for i in", "1 # note delta = 1 if margin > 0: loss += margin", "dpM to store two contributions that tells us which rows of X we", "1.0/num_train * X.T.dot(dpM) + 2*W # *****END OF YOUR CODE (DO NOT DELETE/MODIFY", "def svm_loss_naive(W, X, y, reg): \"\"\" Structured SVM loss function, naive implementation (with", "NOT DELETE/MODIFY THIS LINE)***** S = X.dot(W) # scores (N,C) # build mask", "- loss as single float - gradient with respect to weights W; an", "and store it dW. # # Rather than first computing the loss and", "loss as single float - gradient with respect to weights W; an array", "import range import numpy as np from random import shuffle from past.builtins import", "############################################################################# # TODO: # # Implement a vectorized version of the gradient for", "from past.builtins import xrange def svm_loss_naive(W, X, y, reg): \"\"\" Structured SVM loss", "subtract fro self self sum of others active dpM[mask] = -1*np.sum(pMactive, axis=1) #", "- reg: (float) regularization strength Returns a tuple of: - loss as single", "scores[j] - correct_class_score + 1 # note delta = 1 if margin >", "Implement a vectorized version of the gradient for the structured SVM # #", "+ 2*W # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** return", "range(num_train): scores = X[i].dot(W) correct_class_score = scores[y[i]] for j in range(num_classes): if j", "i in range(num_train): scores = X[i].dot(W) correct_class_score = scores[y[i]] for j in range(num_classes):", "= np.zeros((X.shape[0], W.shape[1])) # N x C zeros # first contributoin (all active", "dtype=bool)[y] # correct scores which we'll be subtracting from all other correct_scores_vec =", "loss. loss += reg * np.sum(W * W) ############################################################################# # TODO: # #", "shape (D, C) containing weights. - X: A numpy array of shape (N,", "implementation. Inputs and outputs are the same as svm_loss_naive. \"\"\" num_train = X.shape[0]", "of shape (N, D) containing a minibatch of data. - y: A numpy", "loss, dW def svm_loss_vectorized(W, X, y, reg): \"\"\" Structured SVM loss function, vectorized", "reg * np.sum(W * W) # maring conributions + regularization # *****END OF", "LINE)***** return loss, dW def svm_loss_vectorized(W, X, y, reg): \"\"\" Structured SVM loss", "YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** ############################################################################# # TODO: # # Implement", "us which rows of X we # should to include in the calculation", "+= margin dW[:,j] += 1*X[i] dW[:,y[i]] += -1*X[i] # Right now the loss", "# # loss. # ############################################################################# # *****START OF YOUR CODE (DO NOT DELETE/MODIFY", "= 1 if margin > 0: loss += margin dW[:,j] += 1*X[i] dW[:,y[i]]", "W) ############################################################################# # TODO: # # Compute the gradient of the loss function", "np.sum(W * W) # maring conributions + regularization # *****END OF YOUR CODE", "weights. - X: A numpy array of shape (N, D) containing a minibatch", "np.sum(W * W) ############################################################################# # TODO: # # Compute the gradient of the", "OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** dW += 2*W # *****END", "gadiaent dW = 1.0/num_train * X.T.dot(dpM) + 2*W # *****END OF YOUR CODE", "loss = 0.0 for i in range(num_train): scores = X[i].dot(W) correct_class_score = scores[y[i]]", "a tuple of: - loss as single float - gradient with respect to", "10 zeros # compute the loss and the gradient num_classes = W.shape[1] num_train", "*****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** S = X.dot(W) #", "loss and then computing the derivative, # # it may be simpler to", "(DO NOT DELETE/MODIFY THIS LINE)***** return loss, dW def svm_loss_vectorized(W, X, y, reg):", "in dW. # # # # Hint: Instead of computing the gradient from", "dW = 1.0/num_train * X.T.dot(dpM) + 2*W # *****END OF YOUR CODE (DO", "shuffle from past.builtins import xrange def svm_loss_naive(W, X, y, reg): \"\"\" Structured SVM", "1.0/num_train * np.sum(pM) + reg * np.sum(W * W) # maring conributions +", "derivative at the same time that the # # loss is being computed.", "= 0 pM = np.where(M>0, M, 0) # positive marings # compute loss", "# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** dW += 2*W", "= X.shape[0] loss = 0.0 for i in range(num_train): scores = X[i].dot(W) correct_class_score", "############################################################################# # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** dW +=", "(all active margins for others) pMactive = np.where(M>0, 1, 0) dpM += pMactive", "0 <= c < C. - reg: (float) regularization strength Returns a tuple", "# gadiaent dW = 1.0/num_train * X.T.dot(dpM) + 2*W # *****END OF YOUR", "M[mask] = 0 pM = np.where(M>0, M, 0) # positive marings # compute", "= np.eye(W.shape[1], dtype=bool)[y] # correct scores which we'll be subtracting from all other", "weights W; an array of same shape as W \"\"\" dW = np.zeros(W.shape)", "loss += reg * np.sum(W * W) ############################################################################# # TODO: # # Compute", "of others active dpM[mask] = -1*np.sum(pMactive, axis=1) # gadiaent dW = 1.0/num_train *", "# # Hint: Instead of computing the gradient from scratch, it may be", "self self sum of others active dpM[mask] = -1*np.sum(pMactive, axis=1) # gadiaent dW", "if j == y[i]: continue margin = scores[j] - correct_class_score + 1 #", "-1*X[i] # Right now the loss is a sum over all training examples,", "first contributoin (all active margins for others) pMactive = np.where(M>0, 1, 0) dpM", "minibatch of data. - y: A numpy array of shape (N,) containing training", "it may be easier # # to reuse some of the intermediate values", "DELETE/MODIFY THIS LINE)***** S = X.dot(W) # scores (N,C) # build mask selecting", "the gradient for the structured SVM # # loss, storing the result in", "= W.shape[1] num_train = X.shape[0] loss = 0.0 for i in range(num_train): scores", "margin > 0: loss += margin dW[:,j] += 1*X[i] dW[:,y[i]] += -1*X[i] #", "* W) ############################################################################# # TODO: # # Compute the gradient of the loss", "the loss is a sum over all training examples, but we want it", "Inputs and outputs are the same as svm_loss_naive. \"\"\" num_train = X.shape[0] #############################################################################", "# compute loss loss = 1.0/num_train * np.sum(pM) + reg * np.sum(W *", "THIS LINE)***** # We'll use dpM to store two contributions that tells us", "# Right now the loss is a sum over all training examples, but", "############################################################################# # TODO: # # Compute the gradient of the loss function and", "0 pM = np.where(M>0, M, 0) # positive marings # compute loss loss", "NOT DELETE/MODIFY THIS LINE)***** return loss, dW def svm_loss_vectorized(W, X, y, reg): \"\"\"", "X.T.dot(dpM) dpM = np.zeros((X.shape[0], W.shape[1])) # N x C zeros # first contributoin", "compute loss loss = 1.0/num_train * np.sum(pM) + reg * np.sum(W * W)", "return loss, dW def svm_loss_vectorized(W, X, y, reg): \"\"\" Structured SVM loss function,", "now the loss is a sum over all training examples, but we want", "np.where(M>0, 1, 0) dpM += pMactive # second contributoin subtract fro self self", "W.shape[1] num_train = X.shape[0] loss = 0.0 for i in range(num_train): scores =", "# Rather than first computing the loss and then computing the derivative, #", "that the # # loss is being computed. As a result you may", "loss, storing the # # result in loss. # ############################################################################# # *****START OF", "* X.T.dot(dpM) + 2*W # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS", "implementation (with loops). Inputs have dimension D, there are C classes, and we", "naive implementation (with loops). Inputs have dimension D, there are C classes, and", "examples. Inputs: - W: A numpy array of shape (D, C) containing weights.", "DELETE/MODIFY THIS LINE)***** ############################################################################# # TODO: # # Implement a vectorized version of", "outputs are the same as svm_loss_naive. \"\"\" num_train = X.shape[0] ############################################################################# # TODO:", "respect to weights W; an array of same shape as W \"\"\" dW", "axis=1) correct_scores = correct_scores_vec[:,np.newaxis] # broadcasting-ready vec # compute margins M = S", "computing the gradient from scratch, it may be easier # # to reuse", "import xrange def svm_loss_naive(W, X, y, reg): \"\"\" Structured SVM loss function, naive", "THIS LINE)***** return loss, dW def svm_loss_vectorized(W, X, y, reg): \"\"\" Structured SVM", "/= num_train # 1/N factor in front dW /= num_train # Add regularization", "*****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** dW += 2*W #", "axis=1) # gadiaent dW = 1.0/num_train * X.T.dot(dpM) + 2*W # *****END OF", "# # to reuse some of the intermediate values that you used to", "# to be an average instead so we divide by num_train. loss /=", "loss = 1.0/num_train * np.sum(pM) + reg * np.sum(W * W) # maring", "by num_train. loss /= num_train # 1/N factor in front dW /= num_train", "the # # loss. # ############################################################################# # *****START OF YOUR CODE (DO NOT", "loss and the gradient num_classes = W.shape[1] num_train = X.shape[0] loss = 0.0", "# Compute the gradient of the loss function and store it dW. #", "1*X[i] dW[:,y[i]] += -1*X[i] # Right now the loss is a sum over", "to the loss. loss += reg * np.sum(W * W) ############################################################################# # TODO:", "to reuse some of the intermediate values that you used to compute the", "computing the loss and then computing the derivative, # # it may be", "# it may be simpler to compute the derivative at the same time", "margins (N,C) M[mask] = 0 pM = np.where(M>0, M, 0) # positive marings", "result in dW. # # # # Hint: Instead of computing the gradient", "# first contributoin (all active margins for others) pMactive = np.where(M>0, 1, 0)", "array of same shape as W \"\"\" dW = np.zeros(W.shape) # 3073 x", "# result in loss. # ############################################################################# # *****START OF YOUR CODE (DO NOT", "DELETE/MODIFY THIS LINE)***** dW += 2*W # *****END OF YOUR CODE (DO NOT", "of shape (D, C) containing weights. - X: A numpy array of shape", "gradient from scratch, it may be easier # # to reuse some of", "S - correct_scores + 1 # margins (N,C) M[mask] = 0 pM =", "function, vectorized implementation. Inputs and outputs are the same as svm_loss_naive. \"\"\" num_train", "- gradient with respect to weights W; an array of same shape as", "the loss and then computing the derivative, # # it may be simpler", "* np.sum(W * W) # maring conributions + regularization # *****END OF YOUR", "j == y[i]: continue margin = scores[j] - correct_class_score + 1 # note", "has label c, where 0 <= c < C. - reg: (float) regularization", "loss. # ############################################################################# # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****", "LINE)***** ############################################################################# # TODO: # # Implement a vectorized version of the gradient", "== y[i]: continue margin = scores[j] - correct_class_score + 1 # note delta", "which we'll be subtracting from all other correct_scores_vec = np.sum(np.where(mask, S, 0), axis=1)", "M, 0) # positive marings # compute loss loss = 1.0/num_train * np.sum(pM)", "# code above to compute the gradient. # ############################################################################# # *****START OF YOUR", "computed. As a result you may need to modify some of the #", "(N,C) M[mask] = 0 pM = np.where(M>0, M, 0) # positive marings #", "= np.where(M>0, M, 0) # positive marings # compute loss loss = 1.0/num_train", "first computing the loss and then computing the derivative, # # it may", "dW = X.T.dot(dpM) dpM = np.zeros((X.shape[0], W.shape[1])) # N x C zeros #", "divide by num_train. loss /= num_train # 1/N factor in front dW /=", "be subtracting from all other correct_scores_vec = np.sum(np.where(mask, S, 0), axis=1) correct_scores =", "= 0.0 for i in range(num_train): scores = X[i].dot(W) correct_class_score = scores[y[i]] for", "+= reg * np.sum(W * W) ############################################################################# # TODO: # # Compute the", "gradient for the structured SVM # # loss, storing the result in dW.", "num_train = X.shape[0] ############################################################################# # TODO: # # Implement a vectorized version of", "vec # compute margins M = S - correct_scores + 1 # margins", "numpy array of shape (N,) containing training labels; y[i] = c means that", "np.where(M>0, M, 0) # positive marings # compute loss loss = 1.0/num_train *", "- correct_class_score + 1 # note delta = 1 if margin > 0:", "it dW. # # Rather than first computing the loss and then computing", "# # Compute the gradient of the loss function and store it dW.", "# compute margins M = S - correct_scores + 1 # margins (N,C)", "if margin > 0: loss += margin dW[:,j] += 1*X[i] dW[:,y[i]] += -1*X[i]", "= X.shape[0] ############################################################################# # TODO: # # Implement a vectorized version of the", "a vectorized version of the structured SVM loss, storing the # # result", "scores which we'll be subtracting from all other correct_scores_vec = np.sum(np.where(mask, S, 0),", "above to compute the gradient. # ############################################################################# # *****START OF YOUR CODE (DO", "# Implement a vectorized version of the structured SVM loss, storing the #", "structured SVM # # loss, storing the result in dW. # # #", "C) containing weights. - X: A numpy array of shape (N, D) containing", "front dW /= num_train # Add regularization to the loss. loss += reg", "the gradient from scratch, it may be easier # # to reuse some", "need to modify some of the # # code above to compute the", "margins M = S - correct_scores + 1 # margins (N,C) M[mask] =", "regularization # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** ############################################################################# #", "(N,C) # build mask selecting only the correct classes (one-hot encodig of y_i)", "to compute the # # loss. # ############################################################################# # *****START OF YOUR CODE", "# TODO: # # Implement a vectorized version of the gradient for the", "+ 1 # margins (N,C) M[mask] = 0 pM = np.where(M>0, M, 0)", "for the structured SVM # # loss, storing the result in dW. #", "y[i] = c means that X[i] has label c, where 0 <= c", "reg: (float) regularization strength Returns a tuple of: - loss as single float", "as np from random import shuffle from past.builtins import xrange def svm_loss_naive(W, X,", "c < C. - reg: (float) regularization strength Returns a tuple of: -", "vectorized implementation. Inputs and outputs are the same as svm_loss_naive. \"\"\" num_train =", "= S - correct_scores + 1 # margins (N,C) M[mask] = 0 pM", "the gradient of the loss function and store it dW. # # Rather", "intermediate values that you used to compute the # # loss. # #############################################################################", "is a sum over all training examples, but we want it # to", "the calculation of dW = X.T.dot(dpM) dpM = np.zeros((X.shape[0], W.shape[1])) # N x", "# # Implement a vectorized version of the gradient for the structured SVM", "# loss, storing the result in dW. # # # # Hint: Instead", "training labels; y[i] = c means that X[i] has label c, where 0", "# note delta = 1 if margin > 0: loss += margin dW[:,j]", "Add regularization to the loss. loss += reg * np.sum(W * W) #############################################################################", "y, reg): \"\"\" Structured SVM loss function, naive implementation (with loops). Inputs have", "from all other correct_scores_vec = np.sum(np.where(mask, S, 0), axis=1) correct_scores = correct_scores_vec[:,np.newaxis] #", "# maring conributions + regularization # *****END OF YOUR CODE (DO NOT DELETE/MODIFY" ]
[ "xml.etree.ElementTree import docutils.core def load_pairs(): # Load pairs of \"example ID, rules code\"", "import docutils.core def load_pairs(): # Load pairs of \"example ID, rules code\" for", "pairs of \"example ID, rules code\" for the test suite. rst_code = _load_rst()", "(slug, i), block.text)) return parsed def load_html(initial_header_level): # Render an HTML fragment ready", "load_html(initial_header_level): # Render an HTML fragment ready for inclusion into a page. rst_code", "i, block in enumerate(section.findall('./literal_block'), start=1): parsed.append(('%s_%d' % (slug, i), block.text)) return parsed def", "rst_code = _load_rst() xml_code = docutils.core.publish_string(rst_code, writer_name='xml') tree = xml.etree.ElementTree.fromstring(xml_code) parsed = []", "parsed.append(('%s_%d' % (slug, i), block.text)) return parsed def load_html(initial_header_level): # Render an HTML", "= _load_rst() xml_code = docutils.core.publish_string(rst_code, writer_name='xml') tree = xml.etree.ElementTree.fromstring(xml_code) parsed = [] for", "an HTML fragment ready for inclusion into a page. rst_code = _load_rst() parts", "= xml.etree.ElementTree.fromstring(xml_code) parsed = [] for section in tree.findall('./section'): slug = section.get('ids').replace('-', '_')", "[] for section in tree.findall('./section'): slug = section.get('ids').replace('-', '_') for i, block in", "the test suite. rst_code = _load_rst() xml_code = docutils.core.publish_string(rst_code, writer_name='xml') tree = xml.etree.ElementTree.fromstring(xml_code)", "start=1): parsed.append(('%s_%d' % (slug, i), block.text)) return parsed def load_html(initial_header_level): # Render an", "in tree.findall('./section'): slug = section.get('ids').replace('-', '_') for i, block in enumerate(section.findall('./literal_block'), start=1): parsed.append(('%s_%d'", "of \"example ID, rules code\" for the test suite. rst_code = _load_rst() xml_code", "xml.etree.ElementTree.fromstring(xml_code) parsed = [] for section in tree.findall('./section'): slug = section.get('ids').replace('-', '_') for", "for i, block in enumerate(section.findall('./literal_block'), start=1): parsed.append(('%s_%d' % (slug, i), block.text)) return parsed", "tree = xml.etree.ElementTree.fromstring(xml_code) parsed = [] for section in tree.findall('./section'): slug = section.get('ids').replace('-',", "for inclusion into a page. rst_code = _load_rst() parts = docutils.core.publish_parts( rst_code, writer_name='html',", "return parsed def load_html(initial_header_level): # Render an HTML fragment ready for inclusion into", "= _load_rst() parts = docutils.core.publish_parts( rst_code, writer_name='html', settings_overrides={'initial_header_level': initial_header_level}) return parts['fragment'] def _load_rst():", "fragment ready for inclusion into a page. rst_code = _load_rst() parts = docutils.core.publish_parts(", "= docutils.core.publish_string(rst_code, writer_name='xml') tree = xml.etree.ElementTree.fromstring(xml_code) parsed = [] for section in tree.findall('./section'):", "page. rst_code = _load_rst() parts = docutils.core.publish_parts( rst_code, writer_name='html', settings_overrides={'initial_header_level': initial_header_level}) return parts['fragment']", "section.get('ids').replace('-', '_') for i, block in enumerate(section.findall('./literal_block'), start=1): parsed.append(('%s_%d' % (slug, i), block.text))", "slug = section.get('ids').replace('-', '_') for i, block in enumerate(section.findall('./literal_block'), start=1): parsed.append(('%s_%d' % (slug,", "Load pairs of \"example ID, rules code\" for the test suite. rst_code =", "docutils.core def load_pairs(): # Load pairs of \"example ID, rules code\" for the", "pkgutil import xml.etree.ElementTree import docutils.core def load_pairs(): # Load pairs of \"example ID,", "test suite. rst_code = _load_rst() xml_code = docutils.core.publish_string(rst_code, writer_name='xml') tree = xml.etree.ElementTree.fromstring(xml_code) parsed", "HTML fragment ready for inclusion into a page. rst_code = _load_rst() parts =", "% (slug, i), block.text)) return parsed def load_html(initial_header_level): # Render an HTML fragment", "suite. rst_code = _load_rst() xml_code = docutils.core.publish_string(rst_code, writer_name='xml') tree = xml.etree.ElementTree.fromstring(xml_code) parsed =", "def load_html(initial_header_level): # Render an HTML fragment ready for inclusion into a page.", "parts = docutils.core.publish_parts( rst_code, writer_name='html', settings_overrides={'initial_header_level': initial_header_level}) return parts['fragment'] def _load_rst(): return pkgutil.get_data('turq',", "into a page. rst_code = _load_rst() parts = docutils.core.publish_parts( rst_code, writer_name='html', settings_overrides={'initial_header_level': initial_header_level})", "i), block.text)) return parsed def load_html(initial_header_level): # Render an HTML fragment ready for", "= docutils.core.publish_parts( rst_code, writer_name='html', settings_overrides={'initial_header_level': initial_header_level}) return parts['fragment'] def _load_rst(): return pkgutil.get_data('turq', 'examples.rst')", "for the test suite. rst_code = _load_rst() xml_code = docutils.core.publish_string(rst_code, writer_name='xml') tree =", "block in enumerate(section.findall('./literal_block'), start=1): parsed.append(('%s_%d' % (slug, i), block.text)) return parsed def load_html(initial_header_level):", "parsed def load_html(initial_header_level): # Render an HTML fragment ready for inclusion into a", "for section in tree.findall('./section'): slug = section.get('ids').replace('-', '_') for i, block in enumerate(section.findall('./literal_block'),", "= section.get('ids').replace('-', '_') for i, block in enumerate(section.findall('./literal_block'), start=1): parsed.append(('%s_%d' % (slug, i),", "in enumerate(section.findall('./literal_block'), start=1): parsed.append(('%s_%d' % (slug, i), block.text)) return parsed def load_html(initial_header_level): #", "ID, rules code\" for the test suite. rst_code = _load_rst() xml_code = docutils.core.publish_string(rst_code,", "import pkgutil import xml.etree.ElementTree import docutils.core def load_pairs(): # Load pairs of \"example", "parsed = [] for section in tree.findall('./section'): slug = section.get('ids').replace('-', '_') for i,", "code\" for the test suite. rst_code = _load_rst() xml_code = docutils.core.publish_string(rst_code, writer_name='xml') tree", "rules code\" for the test suite. rst_code = _load_rst() xml_code = docutils.core.publish_string(rst_code, writer_name='xml')", "def load_pairs(): # Load pairs of \"example ID, rules code\" for the test", "= [] for section in tree.findall('./section'): slug = section.get('ids').replace('-', '_') for i, block", "writer_name='xml') tree = xml.etree.ElementTree.fromstring(xml_code) parsed = [] for section in tree.findall('./section'): slug =", "xml_code = docutils.core.publish_string(rst_code, writer_name='xml') tree = xml.etree.ElementTree.fromstring(xml_code) parsed = [] for section in", "inclusion into a page. rst_code = _load_rst() parts = docutils.core.publish_parts( rst_code, writer_name='html', settings_overrides={'initial_header_level':", "a page. rst_code = _load_rst() parts = docutils.core.publish_parts( rst_code, writer_name='html', settings_overrides={'initial_header_level': initial_header_level}) return", "block.text)) return parsed def load_html(initial_header_level): # Render an HTML fragment ready for inclusion", "Render an HTML fragment ready for inclusion into a page. rst_code = _load_rst()", "_load_rst() xml_code = docutils.core.publish_string(rst_code, writer_name='xml') tree = xml.etree.ElementTree.fromstring(xml_code) parsed = [] for section", "rst_code = _load_rst() parts = docutils.core.publish_parts( rst_code, writer_name='html', settings_overrides={'initial_header_level': initial_header_level}) return parts['fragment'] def", "ready for inclusion into a page. rst_code = _load_rst() parts = docutils.core.publish_parts( rst_code,", "import xml.etree.ElementTree import docutils.core def load_pairs(): # Load pairs of \"example ID, rules", "\"example ID, rules code\" for the test suite. rst_code = _load_rst() xml_code =", "load_pairs(): # Load pairs of \"example ID, rules code\" for the test suite.", "docutils.core.publish_string(rst_code, writer_name='xml') tree = xml.etree.ElementTree.fromstring(xml_code) parsed = [] for section in tree.findall('./section'): slug", "# Render an HTML fragment ready for inclusion into a page. rst_code =", "# Load pairs of \"example ID, rules code\" for the test suite. rst_code", "_load_rst() parts = docutils.core.publish_parts( rst_code, writer_name='html', settings_overrides={'initial_header_level': initial_header_level}) return parts['fragment'] def _load_rst(): return", "section in tree.findall('./section'): slug = section.get('ids').replace('-', '_') for i, block in enumerate(section.findall('./literal_block'), start=1):", "tree.findall('./section'): slug = section.get('ids').replace('-', '_') for i, block in enumerate(section.findall('./literal_block'), start=1): parsed.append(('%s_%d' %", "'_') for i, block in enumerate(section.findall('./literal_block'), start=1): parsed.append(('%s_%d' % (slug, i), block.text)) return", "enumerate(section.findall('./literal_block'), start=1): parsed.append(('%s_%d' % (slug, i), block.text)) return parsed def load_html(initial_header_level): # Render" ]
[ "non-negative number, n. #Credits: #Special thanks to @mithmatt for adding this problem and", "description and prototypes) # Question Title and Description on leetcode.com # Function Declaration", "# :type n: int # :rtype: int # \"\"\" # Time Is Money", "\"\"\" # :type n: int # :rtype: int # \"\"\" # Time Is", "@mithmatt for adding this problem and creating all test cases. #class Solution: #", "#Count the number of prime numbers less than a non-negative number, n. #Credits:", "#class Solution: # def countPrimes(self, n): # \"\"\" # :type n: int #", "# Question Title and Description on leetcode.com # Function Declaration and Function Prototypes", "Function Declaration and Function Prototypes on leetcode.com #204. Count Primes #Description: #Count the", "all test cases. #class Solution: # def countPrimes(self, n): # \"\"\" # :type", "#Credits: #Special thanks to @mithmatt for adding this problem and creating all test", "prime numbers less than a non-negative number, n. #Credits: #Special thanks to @mithmatt", "Count Primes #Description: #Count the number of prime numbers less than a non-negative", "than a non-negative number, n. #Credits: #Special thanks to @mithmatt for adding this", "prototypes) # Question Title and Description on leetcode.com # Function Declaration and Function", "#Description: #Count the number of prime numbers less than a non-negative number, n.", "Function Prototypes on leetcode.com #204. Count Primes #Description: #Count the number of prime", "and Description on leetcode.com # Function Declaration and Function Prototypes on leetcode.com #204.", "# def countPrimes(self, n): # \"\"\" # :type n: int # :rtype: int", "countPrimes(self, n): # \"\"\" # :type n: int # :rtype: int # \"\"\"", "<gh_stars>0 # DRUNKWATER TEMPLATE(add description and prototypes) # Question Title and Description on", "and prototypes) # Question Title and Description on leetcode.com # Function Declaration and", "test cases. #class Solution: # def countPrimes(self, n): # \"\"\" # :type n:", "for adding this problem and creating all test cases. #class Solution: # def", "Title and Description on leetcode.com # Function Declaration and Function Prototypes on leetcode.com", "def countPrimes(self, n): # \"\"\" # :type n: int # :rtype: int #", "n): # \"\"\" # :type n: int # :rtype: int # \"\"\" #", "TEMPLATE(add description and prototypes) # Question Title and Description on leetcode.com # Function", "on leetcode.com # Function Declaration and Function Prototypes on leetcode.com #204. Count Primes", "#204. Count Primes #Description: #Count the number of prime numbers less than a", "and creating all test cases. #class Solution: # def countPrimes(self, n): # \"\"\"", "# Function Declaration and Function Prototypes on leetcode.com #204. Count Primes #Description: #Count", "leetcode.com # Function Declaration and Function Prototypes on leetcode.com #204. Count Primes #Description:", "numbers less than a non-negative number, n. #Credits: #Special thanks to @mithmatt for", "adding this problem and creating all test cases. #class Solution: # def countPrimes(self,", "cases. #class Solution: # def countPrimes(self, n): # \"\"\" # :type n: int", "number, n. #Credits: #Special thanks to @mithmatt for adding this problem and creating", "Declaration and Function Prototypes on leetcode.com #204. Count Primes #Description: #Count the number", "# DRUNKWATER TEMPLATE(add description and prototypes) # Question Title and Description on leetcode.com", "to @mithmatt for adding this problem and creating all test cases. #class Solution:", "number of prime numbers less than a non-negative number, n. #Credits: #Special thanks", "Question Title and Description on leetcode.com # Function Declaration and Function Prototypes on", "Prototypes on leetcode.com #204. Count Primes #Description: #Count the number of prime numbers", "thanks to @mithmatt for adding this problem and creating all test cases. #class", "Solution: # def countPrimes(self, n): # \"\"\" # :type n: int # :rtype:", "less than a non-negative number, n. #Credits: #Special thanks to @mithmatt for adding", "creating all test cases. #class Solution: # def countPrimes(self, n): # \"\"\" #", "DRUNKWATER TEMPLATE(add description and prototypes) # Question Title and Description on leetcode.com #", "leetcode.com #204. Count Primes #Description: #Count the number of prime numbers less than", "Primes #Description: #Count the number of prime numbers less than a non-negative number,", "#Special thanks to @mithmatt for adding this problem and creating all test cases.", "# \"\"\" # :type n: int # :rtype: int # \"\"\" # Time", "n. #Credits: #Special thanks to @mithmatt for adding this problem and creating all", "this problem and creating all test cases. #class Solution: # def countPrimes(self, n):", "of prime numbers less than a non-negative number, n. #Credits: #Special thanks to", "on leetcode.com #204. Count Primes #Description: #Count the number of prime numbers less", "problem and creating all test cases. #class Solution: # def countPrimes(self, n): #", "and Function Prototypes on leetcode.com #204. Count Primes #Description: #Count the number of", "Description on leetcode.com # Function Declaration and Function Prototypes on leetcode.com #204. Count", "the number of prime numbers less than a non-negative number, n. #Credits: #Special", "a non-negative number, n. #Credits: #Special thanks to @mithmatt for adding this problem" ]
[ "django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations =", "), migrations.CreateModel( name='Post', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('pub_date', models.DateTimeField(auto_now_add=True, verbose_name='发布日期')), ('content',", "models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('pub_date', models.DateTimeField(auto_now_add=True, verbose_name='发布日期')), ('content', models.TextField(verbose_name='内容')), ('upvotes', models.IntegerField(default=0)), ('downvotes', models.IntegerField(default=0)),", "# Generated by Django 3.2.7 on 2021-09-11 13:54 from django.conf import settings from", "name='Post', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('pub_date', models.DateTimeField(auto_now_add=True, verbose_name='发布日期')), ('content', models.TextField(verbose_name='内容')), ('upvotes',", "serialize=False, verbose_name='ID')), ('pub_date', models.DateTimeField(auto_now_add=True, verbose_name='发布日期')), ('content', models.TextField(verbose_name='内容')), ('upvotes', models.IntegerField(default=0)), ('downvotes', models.IntegerField(default=0)), ('relpy_to', models.ForeignKey(blank=True,", "Generated by Django 3.2.7 on 2021-09-11 13:54 from django.conf import settings from django.db", "= True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Thread', fields=[", "3.2.7 on 2021-09-11 13:54 from django.conf import settings from django.db import migrations, models", "('pub_date', models.DateTimeField(auto_now_add=True, verbose_name='发布日期')), ('content', models.TextField(verbose_name='内容')), ('upvotes', models.IntegerField(default=0)), ('downvotes', models.IntegerField(default=0)), ('relpy_to', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL,", "from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration):", "migrations.CreateModel( name='Thread', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=128, verbose_name='标题')), ('pub_date', models.DateTimeField(auto_now_add=True,", "verbose_name='ID')), ('title', models.CharField(max_length=128, verbose_name='标题')), ('pub_date', models.DateTimeField(auto_now_add=True, verbose_name='发布日期')), ('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, verbose_name='用户')), ],", "migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Thread', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),", "] operations = [ migrations.CreateModel( name='Thread', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title',", "models.TextField(verbose_name='内容')), ('upvotes', models.IntegerField(default=0)), ('downvotes', models.IntegerField(default=0)), ('relpy_to', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='forum.post', verbose_name='回复')), ('thread', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,", "models.DateTimeField(auto_now_add=True, verbose_name='发布日期')), ('content', models.TextField(verbose_name='内容')), ('upvotes', models.IntegerField(default=0)), ('downvotes', models.IntegerField(default=0)), ('relpy_to', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='forum.post',", "class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [", "models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, verbose_name='用户')), ], ), migrations.CreateModel( name='Post', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False,", "import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [", "[ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Thread', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False,", "django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial", "('pub_date', models.DateTimeField(auto_now_add=True, verbose_name='发布日期')), ('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, verbose_name='用户')), ], ), migrations.CreateModel( name='Post', fields=[", "('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=128, verbose_name='标题')), ('pub_date', models.DateTimeField(auto_now_add=True, verbose_name='发布日期')), ('user', models.ForeignKey(null=True,", "primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=128, verbose_name='标题')), ('pub_date', models.DateTimeField(auto_now_add=True, verbose_name='发布日期')), ('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL,", "models.DateTimeField(auto_now_add=True, verbose_name='发布日期')), ('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, verbose_name='用户')), ], ), migrations.CreateModel( name='Post', fields=[ ('id',", "verbose_name='用户')), ], ), migrations.CreateModel( name='Post', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('pub_date', models.DateTimeField(auto_now_add=True,", "on 2021-09-11 13:54 from django.conf import settings from django.db import migrations, models import", "[ migrations.CreateModel( name='Thread', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=128, verbose_name='标题')), ('pub_date',", "import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations", "models.IntegerField(default=0)), ('relpy_to', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='forum.post', verbose_name='回复')), ('thread', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='forum.thread')), ('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL,", "to='forum.post', verbose_name='回复')), ('thread', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='forum.thread')), ('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, verbose_name='用户')), ], ), ]", "operations = [ migrations.CreateModel( name='Thread', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=128,", "('relpy_to', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='forum.post', verbose_name='回复')), ('thread', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='forum.thread')), ('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL,", "verbose_name='ID')), ('pub_date', models.DateTimeField(auto_now_add=True, verbose_name='发布日期')), ('content', models.TextField(verbose_name='内容')), ('upvotes', models.IntegerField(default=0)), ('downvotes', models.IntegerField(default=0)), ('relpy_to', models.ForeignKey(blank=True, null=True,", "migrations.CreateModel( name='Post', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('pub_date', models.DateTimeField(auto_now_add=True, verbose_name='发布日期')), ('content', models.TextField(verbose_name='内容')),", "django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies =", "verbose_name='发布日期')), ('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, verbose_name='用户')), ], ), migrations.CreateModel( name='Post', fields=[ ('id', models.BigAutoField(auto_created=True,", "to=settings.AUTH_USER_MODEL, verbose_name='用户')), ], ), migrations.CreateModel( name='Post', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('pub_date',", "('upvotes', models.IntegerField(default=0)), ('downvotes', models.IntegerField(default=0)), ('relpy_to', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='forum.post', verbose_name='回复')), ('thread', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='forum.thread')),", "null=True, on_delete=django.db.models.deletion.SET_NULL, to='forum.post', verbose_name='回复')), ('thread', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='forum.thread')), ('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, verbose_name='用户')), ],", "models.CharField(max_length=128, verbose_name='标题')), ('pub_date', models.DateTimeField(auto_now_add=True, verbose_name='发布日期')), ('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, verbose_name='用户')), ], ), migrations.CreateModel(", "models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='forum.post', verbose_name='回复')), ('thread', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='forum.thread')), ('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, verbose_name='用户')),", "models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ]", "dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Thread', fields=[ ('id', models.BigAutoField(auto_created=True,", "('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, verbose_name='用户')), ], ), migrations.CreateModel( name='Post', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True,", "by Django 3.2.7 on 2021-09-11 13:54 from django.conf import settings from django.db import", "13:54 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class", "primary_key=True, serialize=False, verbose_name='ID')), ('pub_date', models.DateTimeField(auto_now_add=True, verbose_name='发布日期')), ('content', models.TextField(verbose_name='内容')), ('upvotes', models.IntegerField(default=0)), ('downvotes', models.IntegerField(default=0)), ('relpy_to',", "initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Thread',", "serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=128, verbose_name='标题')), ('pub_date', models.DateTimeField(auto_now_add=True, verbose_name='发布日期')), ('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, verbose_name='用户')),", "name='Thread', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=128, verbose_name='标题')), ('pub_date', models.DateTimeField(auto_now_add=True, verbose_name='发布日期')),", "import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial =", "Django 3.2.7 on 2021-09-11 13:54 from django.conf import settings from django.db import migrations,", "True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Thread', fields=[ ('id',", "fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=128, verbose_name='标题')), ('pub_date', models.DateTimeField(auto_now_add=True, verbose_name='发布日期')), ('user',", "2021-09-11 13:54 from django.conf import settings from django.db import migrations, models import django.db.models.deletion", "models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=128, verbose_name='标题')), ('pub_date', models.DateTimeField(auto_now_add=True, verbose_name='发布日期')), ('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL,", "on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, verbose_name='用户')), ], ), migrations.CreateModel( name='Post', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),", "Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel(", "('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('pub_date', models.DateTimeField(auto_now_add=True, verbose_name='发布日期')), ('content', models.TextField(verbose_name='内容')), ('upvotes', models.IntegerField(default=0)), ('downvotes',", "on_delete=django.db.models.deletion.SET_NULL, to='forum.post', verbose_name='回复')), ('thread', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='forum.thread')), ('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, verbose_name='用户')), ], ),", "from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies", "settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True", "models.IntegerField(default=0)), ('downvotes', models.IntegerField(default=0)), ('relpy_to', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='forum.post', verbose_name='回复')), ('thread', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='forum.thread')), ('user',", "('title', models.CharField(max_length=128, verbose_name='标题')), ('pub_date', models.DateTimeField(auto_now_add=True, verbose_name='发布日期')), ('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, verbose_name='用户')), ], ),", "= [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Thread', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True,", "= [ migrations.CreateModel( name='Thread', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=128, verbose_name='标题')),", "verbose_name='标题')), ('pub_date', models.DateTimeField(auto_now_add=True, verbose_name='发布日期')), ('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, verbose_name='用户')), ], ), migrations.CreateModel( name='Post',", "migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL),", "], ), migrations.CreateModel( name='Post', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('pub_date', models.DateTimeField(auto_now_add=True, verbose_name='发布日期')),", "fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('pub_date', models.DateTimeField(auto_now_add=True, verbose_name='发布日期')), ('content', models.TextField(verbose_name='内容')), ('upvotes', models.IntegerField(default=0)),", "('content', models.TextField(verbose_name='内容')), ('upvotes', models.IntegerField(default=0)), ('downvotes', models.IntegerField(default=0)), ('relpy_to', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='forum.post', verbose_name='回复')), ('thread',", "verbose_name='发布日期')), ('content', models.TextField(verbose_name='内容')), ('upvotes', models.IntegerField(default=0)), ('downvotes', models.IntegerField(default=0)), ('relpy_to', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='forum.post', verbose_name='回复')),", "('downvotes', models.IntegerField(default=0)), ('relpy_to', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='forum.post', verbose_name='回复')), ('thread', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='forum.thread')), ('user', models.ForeignKey(null=True," ]
[ "return self.TABLE.__class__.__name__ @property def id(self): return f'{self.TABLE_NAME}ID' @property def cols(self): return [{k: v}", "table \"\"\" NAMESPACE = os.getenv('ECMS_HOST') FORIEGN_KEYS = {} def __init__(self): self.TABLE = self.__class__", "\"\"\" NAMESPACE = os.getenv('ECMS_HOST') FORIEGN_KEYS = {} def __init__(self): self.TABLE = self.__class__ self.TABLE_NAME", "__init__(self): self.TABLE = self.__class__ self.TABLE_NAME = self.__class__.__name__ @property def namespace(self): return self.NAMESPACE @property", "def cols(self): return [{k: v} for k, v in self.TABLE.__dict__.items() if '__' not", "in k] @property def column_names(self): return [col for cols in self.cols for col,", "@property def table(self): return self.TABLE.__class__.__name__ @property def id(self): return f'{self.TABLE_NAME}ID' @property def cols(self):", "def table(self): return self.TABLE.__class__.__name__ @property def id(self): return f'{self.TABLE_NAME}ID' @property def cols(self): return", "Mixin class to get quick properties from the table \"\"\" NAMESPACE = os.getenv('ECMS_HOST')", "@property def cols(self): return [{k: v} for k, v in self.TABLE.__dict__.items() if '__'", "= self.__class__ self.TABLE_NAME = self.__class__.__name__ @property def namespace(self): return self.NAMESPACE @property def table(self):", "import os __all__ = ['TableMixin'] class TableMixin: \"\"\" Table Mixin class to get", "table(self): return self.TABLE.__class__.__name__ @property def id(self): return f'{self.TABLE_NAME}ID' @property def cols(self): return [{k:", "<reponame>Poseidon-Dev/ecms-api<filename>src/EcmsApi/tables/_base.py import os __all__ = ['TableMixin'] class TableMixin: \"\"\" Table Mixin class to", "os __all__ = ['TableMixin'] class TableMixin: \"\"\" Table Mixin class to get quick", "self.__class__.__name__ @property def namespace(self): return self.NAMESPACE @property def table(self): return self.TABLE.__class__.__name__ @property def", "@property def id(self): return f'{self.TABLE_NAME}ID' @property def cols(self): return [{k: v} for k,", "v in self.TABLE.__dict__.items() if '__' not in k] @property def column_names(self): return [col", "[{k: v} for k, v in self.TABLE.__dict__.items() if '__' not in k] @property", "FORIEGN_KEYS = {} def __init__(self): self.TABLE = self.__class__ self.TABLE_NAME = self.__class__.__name__ @property def", "namespace(self): return self.NAMESPACE @property def table(self): return self.TABLE.__class__.__name__ @property def id(self): return f'{self.TABLE_NAME}ID'", "= {} def __init__(self): self.TABLE = self.__class__ self.TABLE_NAME = self.__class__.__name__ @property def namespace(self):", "['TableMixin'] class TableMixin: \"\"\" Table Mixin class to get quick properties from the", "return f'{self.TABLE_NAME}ID' @property def cols(self): return [{k: v} for k, v in self.TABLE.__dict__.items()", "'__' not in k] @property def column_names(self): return [col for cols in self.cols", "class to get quick properties from the table \"\"\" NAMESPACE = os.getenv('ECMS_HOST') FORIEGN_KEYS", "= ['TableMixin'] class TableMixin: \"\"\" Table Mixin class to get quick properties from", "return self.NAMESPACE @property def table(self): return self.TABLE.__class__.__name__ @property def id(self): return f'{self.TABLE_NAME}ID' @property", "self.TABLE.__dict__.items() if '__' not in k] @property def column_names(self): return [col for cols", "not in k] @property def column_names(self): return [col for cols in self.cols for", "@property def column_names(self): return [col for cols in self.cols for col, _ in", "id(self): return f'{self.TABLE_NAME}ID' @property def cols(self): return [{k: v} for k, v in", "def __init__(self): self.TABLE = self.__class__ self.TABLE_NAME = self.__class__.__name__ @property def namespace(self): return self.NAMESPACE", "self.TABLE_NAME = self.__class__.__name__ @property def namespace(self): return self.NAMESPACE @property def table(self): return self.TABLE.__class__.__name__", "self.NAMESPACE @property def table(self): return self.TABLE.__class__.__name__ @property def id(self): return f'{self.TABLE_NAME}ID' @property def", "in self.TABLE.__dict__.items() if '__' not in k] @property def column_names(self): return [col for", "to get quick properties from the table \"\"\" NAMESPACE = os.getenv('ECMS_HOST') FORIEGN_KEYS =", "= self.__class__.__name__ @property def namespace(self): return self.NAMESPACE @property def table(self): return self.TABLE.__class__.__name__ @property", "= os.getenv('ECMS_HOST') FORIEGN_KEYS = {} def __init__(self): self.TABLE = self.__class__ self.TABLE_NAME = self.__class__.__name__", "__all__ = ['TableMixin'] class TableMixin: \"\"\" Table Mixin class to get quick properties", "the table \"\"\" NAMESPACE = os.getenv('ECMS_HOST') FORIEGN_KEYS = {} def __init__(self): self.TABLE =", "self.TABLE = self.__class__ self.TABLE_NAME = self.__class__.__name__ @property def namespace(self): return self.NAMESPACE @property def", "self.__class__ self.TABLE_NAME = self.__class__.__name__ @property def namespace(self): return self.NAMESPACE @property def table(self): return", "cols(self): return [{k: v} for k, v in self.TABLE.__dict__.items() if '__' not in", "properties from the table \"\"\" NAMESPACE = os.getenv('ECMS_HOST') FORIEGN_KEYS = {} def __init__(self):", "def namespace(self): return self.NAMESPACE @property def table(self): return self.TABLE.__class__.__name__ @property def id(self): return", "from the table \"\"\" NAMESPACE = os.getenv('ECMS_HOST') FORIEGN_KEYS = {} def __init__(self): self.TABLE", "{} def __init__(self): self.TABLE = self.__class__ self.TABLE_NAME = self.__class__.__name__ @property def namespace(self): return", "\"\"\" Table Mixin class to get quick properties from the table \"\"\" NAMESPACE", "quick properties from the table \"\"\" NAMESPACE = os.getenv('ECMS_HOST') FORIEGN_KEYS = {} def", "TableMixin: \"\"\" Table Mixin class to get quick properties from the table \"\"\"", "@property def namespace(self): return self.NAMESPACE @property def table(self): return self.TABLE.__class__.__name__ @property def id(self):", "get quick properties from the table \"\"\" NAMESPACE = os.getenv('ECMS_HOST') FORIEGN_KEYS = {}", "Table Mixin class to get quick properties from the table \"\"\" NAMESPACE =", "class TableMixin: \"\"\" Table Mixin class to get quick properties from the table", "def id(self): return f'{self.TABLE_NAME}ID' @property def cols(self): return [{k: v} for k, v", "os.getenv('ECMS_HOST') FORIEGN_KEYS = {} def __init__(self): self.TABLE = self.__class__ self.TABLE_NAME = self.__class__.__name__ @property", "for k, v in self.TABLE.__dict__.items() if '__' not in k] @property def column_names(self):", "k] @property def column_names(self): return [col for cols in self.cols for col, _", "f'{self.TABLE_NAME}ID' @property def cols(self): return [{k: v} for k, v in self.TABLE.__dict__.items() if", "def column_names(self): return [col for cols in self.cols for col, _ in cols.items()]", "v} for k, v in self.TABLE.__dict__.items() if '__' not in k] @property def", "if '__' not in k] @property def column_names(self): return [col for cols in", "self.TABLE.__class__.__name__ @property def id(self): return f'{self.TABLE_NAME}ID' @property def cols(self): return [{k: v} for", "k, v in self.TABLE.__dict__.items() if '__' not in k] @property def column_names(self): return", "NAMESPACE = os.getenv('ECMS_HOST') FORIEGN_KEYS = {} def __init__(self): self.TABLE = self.__class__ self.TABLE_NAME =", "return [{k: v} for k, v in self.TABLE.__dict__.items() if '__' not in k]" ]
[ "-> int: res = dict() s = 0 ans = 0 for i,", "res[s] = i if s - 1 in res: ans = max(ans, i-res[s-1])", "int: res = dict() s = 0 ans = 0 for i, c", "if s not in res: res[s] = i if s - 1 in", "hours: List[int]) -> int: res = dict() s = 0 ans = 0", "= 0 for i, c in enumerate(hours): s += 1 if c >", "0 for i, c in enumerate(hours): s += 1 if c > 8", "i if s - 1 in res: ans = max(ans, i-res[s-1]) return ans", "i, c in enumerate(hours): s += 1 if c > 8 else -1", "+ 1 if s not in res: res[s] = i if s -", "Solution: def longestWPI(self, hours: List[int]) -> int: res = dict() s = 0", "s > 0: ans = i + 1 if s not in res:", "1 if s not in res: res[s] = i if s - 1", "in res: res[s] = i if s - 1 in res: ans =", "0 ans = 0 for i, c in enumerate(hours): s += 1 if", "List[int]) -> int: res = dict() s = 0 ans = 0 for", "not in res: res[s] = i if s - 1 in res: ans", "for i, c in enumerate(hours): s += 1 if c > 8 else", "+= 1 if c > 8 else -1 if s > 0: ans", "s += 1 if c > 8 else -1 if s > 0:", "class Solution: def longestWPI(self, hours: List[int]) -> int: res = dict() s =", "ans = 0 for i, c in enumerate(hours): s += 1 if c", "1 if c > 8 else -1 if s > 0: ans =", "> 8 else -1 if s > 0: ans = i + 1", "else -1 if s > 0: ans = i + 1 if s", "c in enumerate(hours): s += 1 if c > 8 else -1 if", "if c > 8 else -1 if s > 0: ans = i", "res = dict() s = 0 ans = 0 for i, c in", "> 0: ans = i + 1 if s not in res: res[s]", "res: res[s] = i if s - 1 in res: ans = max(ans,", "= i + 1 if s not in res: res[s] = i if", "8 else -1 if s > 0: ans = i + 1 if", "-1 if s > 0: ans = i + 1 if s not", "s not in res: res[s] = i if s - 1 in res:", "s = 0 ans = 0 for i, c in enumerate(hours): s +=", "= dict() s = 0 ans = 0 for i, c in enumerate(hours):", "0: ans = i + 1 if s not in res: res[s] =", "dict() s = 0 ans = 0 for i, c in enumerate(hours): s", "def longestWPI(self, hours: List[int]) -> int: res = dict() s = 0 ans", "= 0 ans = 0 for i, c in enumerate(hours): s += 1", "if s > 0: ans = i + 1 if s not in", "= i if s - 1 in res: ans = max(ans, i-res[s-1]) return", "i + 1 if s not in res: res[s] = i if s", "c > 8 else -1 if s > 0: ans = i +", "longestWPI(self, hours: List[int]) -> int: res = dict() s = 0 ans =", "enumerate(hours): s += 1 if c > 8 else -1 if s >", "in enumerate(hours): s += 1 if c > 8 else -1 if s", "ans = i + 1 if s not in res: res[s] = i" ]
[ "return self._name @property def power(self): \"\"\"Power reading of CT clamp in W\"\"\" return", "= value @property def name(self): \"\"\"Name of CT clamp\"\"\" return self._name @property def", "= name self._value = value @property def name(self): \"\"\"Name of CT clamp\"\"\" return", "-> None: self._name = name self._value = value @property def name(self): \"\"\"Name of", "def name(self): \"\"\"Name of CT clamp\"\"\" return self._name @property def power(self): \"\"\"Power reading", "def __init__(self, name, value) -> None: self._name = name self._value = value @property", "CT clamp\"\"\" return self._name @property def power(self): \"\"\"Power reading of CT clamp in", "self._value = value @property def name(self): \"\"\"Name of CT clamp\"\"\" return self._name @property", "__init__(self, name, value) -> None: self._name = name self._value = value @property def", "None: self._name = name self._value = value @property def name(self): \"\"\"Name of CT", "@property def name(self): \"\"\"Name of CT clamp\"\"\" return self._name @property def power(self): \"\"\"Power", "clamp\"\"\" return self._name @property def power(self): \"\"\"Power reading of CT clamp in W\"\"\"", "self._name @property def power(self): \"\"\"Power reading of CT clamp in W\"\"\" return self._value", "of CT clamp\"\"\" return self._name @property def power(self): \"\"\"Power reading of CT clamp", "value @property def name(self): \"\"\"Name of CT clamp\"\"\" return self._name @property def power(self):", "<reponame>CJNE/pymyenergi class CT: def __init__(self, name, value) -> None: self._name = name self._value", "value) -> None: self._name = name self._value = value @property def name(self): \"\"\"Name", "name(self): \"\"\"Name of CT clamp\"\"\" return self._name @property def power(self): \"\"\"Power reading of", "self._name = name self._value = value @property def name(self): \"\"\"Name of CT clamp\"\"\"", "name self._value = value @property def name(self): \"\"\"Name of CT clamp\"\"\" return self._name", "\"\"\"Name of CT clamp\"\"\" return self._name @property def power(self): \"\"\"Power reading of CT", "name, value) -> None: self._name = name self._value = value @property def name(self):", "CT: def __init__(self, name, value) -> None: self._name = name self._value = value", "class CT: def __init__(self, name, value) -> None: self._name = name self._value =" ]
[ "File Dir” # 在当前文件夹下将hello_world.txt文件复制为hello_world_bak.txt src = r'hello_world.txt' dst = r'hello_world_bak.txt' import shutil shutil.copyfile(src,", "# 先确认在VSCode的Settings中,勾选“Terminal:Excute In File Dir” # 在当前文件夹下将hello_world.txt文件复制为hello_world_bak.txt src = r'hello_world.txt' dst = r'hello_world_bak.txt'", "In File Dir” # 在当前文件夹下将hello_world.txt文件复制为hello_world_bak.txt src = r'hello_world.txt' dst = r'hello_world_bak.txt' import shutil", "Dir” # 在当前文件夹下将hello_world.txt文件复制为hello_world_bak.txt src = r'hello_world.txt' dst = r'hello_world_bak.txt' import shutil shutil.copyfile(src, dst)", "先确认在VSCode的Settings中,勾选“Terminal:Excute In File Dir” # 在当前文件夹下将hello_world.txt文件复制为hello_world_bak.txt src = r'hello_world.txt' dst = r'hello_world_bak.txt' import" ]
[ "\"zone\": \"global\", }, {\"status\": RequestStatus.FAILED, \"error\": \"stream ended\"}, ) except Exception as e:", "rois = [] lines = [] for region in video_info.regions: points = []", "parser.add_argument( \"--debug\", default=False, type=lambda x: (str(x).lower() == \"true\") ) args = parser.parse_args() args.video_uuid", "15 mins INFER_URL = f\"{args.nxs_url}/api/v2/tasks/tensors/infer\" OBJECT_DETECTOR_UUID = args.object_detector_uuid TRACKER_UUID = args.tracker_uuid cap =", "App\") parser.add_argument(\"--video_uuid\", type=str) parser.add_argument(\"--nxs_url\", type=str) parser.add_argument(\"--nxs_api_key\", type=str) parser.add_argument( \"--object_detector_uuid\", type=str, default=\"bbff897256c9431eb19a2ad311749b39\", ) parser.add_argument(", "InDbTrackingAppRequest, RequestStatus, ) from apps.vehicle_counting.worker.utils import * DB_TASKS_COLLECTION_NAME = \"tasks\" DB_COUNTS_COLLECTION_NAME = \"counts\"", "\"tasks\" DB_COUNTS_COLLECTION_NAME = \"counts\" DB_LOGS_COLLECTION_NAME = \"logs\" STORAGE_LOGS_DIR_PATH = \"logs\" def main(): import", "video_url=video_info.video_url, rois=rois, lines=lines, tracking_classes=video_info.tracking_classes, visualize=False, collect_logs=args.debug, skip_frame=video_info.skip_frames, blobstore_conn_str=args.blobstore_conn_str, blobstore_container_name=args.blobstore_container, cosmosdb_conn_str=args.cosmosdb_conn_str, cosmosdb_db_name=args.cosmosdb_db_name, counting_report_interval_secs=video_info.count_interval_secs, job_duration=video_info.job_duration,", ") app.run_tracking() db_client = NxsDbFactory.create_db( NxsDbType.MONGODB, uri=args.cosmosdb_conn_str, db_name=args.cosmosdb_db_name, ) if app.job_completed: db_client.update( DB_TASKS_COLLECTION_NAME,", "y=int(line.p1.y * frame_height), ), ) ) if \".m3u8\" not in video_info.video_url: from apps.vehicle_counting.worker.online_worker", "job_duration=video_info.job_duration, ) app.run_tracking() db_client = NxsDbFactory.create_db( NxsDbType.MONGODB, uri=args.cosmosdb_conn_str, db_name=args.cosmosdb_db_name, ) if app.job_completed: db_client.update(", ") if \".m3u8\" not in video_info.video_url: from apps.vehicle_counting.worker.online_worker import ( OnlineVehicleTrackingApp, ) app", "db_client.update( DB_TASKS_COLLECTION_NAME, { \"video_uuid\": args.video_uuid, \"zone\": \"global\", }, {\"status\": RequestStatus.COMPLETED, \"error\": \"\"}, )", "uri=args.cosmosdb_conn_str, db_name=args.cosmosdb_db_name, ) if app.job_completed: db_client.update( DB_TASKS_COLLECTION_NAME, { \"video_uuid\": args.video_uuid, \"zone\": \"global\", },", "* frame_width), y=int(line.p0.y * frame_height), ), p1=NxsPoint( x=int(line.p1.x * frame_width), y=int(line.p1.y * frame_height),", "\"video_uuid\": args.video_uuid, \"zone\": \"global\", }, {\"status\": RequestStatus.RUNNING}, ) video_info = InDbTrackingAppRequest( **db_client.query( DB_TASKS_COLLECTION_NAME,", "cosmosdb_conn_str=args.cosmosdb_conn_str, cosmosdb_db_name=args.cosmosdb_db_name, counting_report_interval_secs=video_info.count_interval_secs, job_duration=video_info.job_duration, ) else: from apps.vehicle_counting.worker.offline_worker import ( OfflineVehicleTrackingApp, ) app", "}, {\"status\": RequestStatus.FAILED, \"error\": \"stream ended\"}, ) except Exception as e: print(e) db_client", "OnlineVehicleTrackingApp( video_uuid=video_info.video_uuid, frame_width=frame_width, frame_height=frame_height, frame_rate=frame_rate, nxs_infer_url=INFER_URL, nxs_api_key=args.nxs_api_key, detector_uuid=OBJECT_DETECTOR_UUID, tracker_uuid=TRACKER_UUID, video_url=video_info.video_url, rois=rois, lines=lines, tracking_classes=video_info.tracking_classes,", "= int(round(cap.get(cv2.CAP_PROP_FPS))) cap.release() rois = [] lines = [] for region in video_info.regions:", "\".m3u8\" not in video_info.video_url: from apps.vehicle_counting.worker.online_worker import ( OnlineVehicleTrackingApp, ) app = OnlineVehicleTrackingApp(", "DB_COUNTS_COLLECTION_NAME = \"counts\" DB_LOGS_COLLECTION_NAME = \"logs\" STORAGE_LOGS_DIR_PATH = \"logs\" def main(): import argparse", "= os.environ[\"COSMOSDB_NAME\"] try: db_client = NxsDbFactory.create_db( NxsDbType.MONGODB, uri=args.cosmosdb_conn_str, db_name=args.cosmosdb_db_name, ) db_client.update( DB_TASKS_COLLECTION_NAME, {", "import NxsDbFactory, NxsDbType from apps.vehicle_counting.app_types.app_request import ( InDbTrackingAppRequest, RequestStatus, ) from apps.vehicle_counting.worker.utils import", "DB_TASKS_COLLECTION_NAME = \"tasks\" DB_COUNTS_COLLECTION_NAME = \"counts\" DB_LOGS_COLLECTION_NAME = \"logs\" STORAGE_LOGS_DIR_PATH = \"logs\" def", "import ( OfflineVehicleTrackingApp, ) app = OfflineVehicleTrackingApp( video_uuid=video_info.video_uuid, frame_width=frame_width, frame_height=frame_height, frame_rate=frame_rate, nxs_infer_url=INFER_URL, nxs_api_key=args.nxs_api_key,", "cosmosdb_db_name=args.cosmosdb_db_name, counting_report_interval_secs=video_info.count_interval_secs, job_duration=video_info.job_duration, ) app.run_tracking() db_client = NxsDbFactory.create_db( NxsDbType.MONGODB, uri=args.cosmosdb_conn_str, db_name=args.cosmosdb_db_name, ) if", "frame_width), int(p.y * frame_height), ) ) rois.append(NxsRoi(points=points)) line = region.line lines.append( NxsLine( p0=NxsPoint(", "OfflineVehicleTrackingApp, ) app = OfflineVehicleTrackingApp( video_uuid=video_info.video_uuid, frame_width=frame_width, frame_height=frame_height, frame_rate=frame_rate, nxs_infer_url=INFER_URL, nxs_api_key=args.nxs_api_key, detector_uuid=OBJECT_DETECTOR_UUID, tracker_uuid=TRACKER_UUID,", "STORAGE_LOGS_DIR_PATH = \"logs\" def main(): import argparse parser = argparse.ArgumentParser(description=\"Vehicle Counting App\") parser.add_argument(\"--video_uuid\",", "cap = cv2.VideoCapture(video_info.video_url) frame_width = cap.get(cv2.CAP_PROP_FRAME_WIDTH) frame_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT) frame_rate = int(round(cap.get(cv2.CAP_PROP_FPS))) cap.release()", "lines = [] for region in video_info.regions: points = [] for p in", "RequestStatus.COMPLETED, \"error\": \"\"}, ) else: db_client.update( DB_TASKS_COLLECTION_NAME, { \"video_uuid\": args.video_uuid, \"zone\": \"global\", },", "f\"{args.nxs_url}/api/v2/tasks/tensors/infer\" OBJECT_DETECTOR_UUID = args.object_detector_uuid TRACKER_UUID = args.tracker_uuid cap = cv2.VideoCapture(video_info.video_url) frame_width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)", "{\"status\": RequestStatus.FAILED, \"error\": \"stream ended\"}, ) except Exception as e: print(e) db_client =", "NxsPoint( int(p.x * frame_width), int(p.y * frame_height), ) ) rois.append(NxsRoi(points=points)) line = region.line", "p in region.roi.points: points.append( NxsPoint( int(p.x * frame_width), int(p.y * frame_height), ) )", ") ) rois.append(NxsRoi(points=points)) line = region.line lines.append( NxsLine( p0=NxsPoint( x=int(line.p0.x * frame_width), y=int(line.p0.y", "\"error\": \"stream ended\"}, ) except Exception as e: print(e) db_client = NxsDbFactory.create_db( NxsDbType.MONGODB,", "cap.release() rois = [] lines = [] for region in video_info.regions: points =", ") if app.job_completed: db_client.update( DB_TASKS_COLLECTION_NAME, { \"video_uuid\": args.video_uuid, \"zone\": \"global\", }, {\"status\": RequestStatus.COMPLETED,", "os.environ[\"BLOBSTORE_CONTAINER\"] args.cosmosdb_conn_str = os.environ[\"COSMOSDB_URL\"] args.cosmosdb_db_name = os.environ[\"COSMOSDB_NAME\"] try: db_client = NxsDbFactory.create_db( NxsDbType.MONGODB, uri=args.cosmosdb_conn_str,", "type=str) parser.add_argument(\"--nxs_url\", type=str) parser.add_argument(\"--nxs_api_key\", type=str) parser.add_argument( \"--object_detector_uuid\", type=str, default=\"bbff897256c9431eb19a2ad311749b39\", ) parser.add_argument( \"--tracker_uuid\", type=str,", "from apps.vehicle_counting.app_types.app_request import ( InDbTrackingAppRequest, RequestStatus, ) from apps.vehicle_counting.worker.utils import * DB_TASKS_COLLECTION_NAME =", "lines=lines, tracking_classes=video_info.tracking_classes, visualize=False, collect_logs=args.debug, skip_frame=video_info.skip_frames, blobstore_conn_str=args.blobstore_conn_str, blobstore_container_name=args.blobstore_container, cosmosdb_conn_str=args.cosmosdb_conn_str, cosmosdb_db_name=args.cosmosdb_db_name, counting_report_interval_secs=video_info.count_interval_secs, job_duration=video_info.job_duration, ) app.run_tracking()", "rois.append(NxsRoi(points=points)) line = region.line lines.append( NxsLine( p0=NxsPoint( x=int(line.p0.x * frame_width), y=int(line.p0.y * frame_height),", "{ \"video_uuid\": args.video_uuid, \"zone\": \"global\", }, {\"status\": RequestStatus.RUNNING}, ) video_info = InDbTrackingAppRequest( **db_client.query(", "apps.vehicle_counting.worker.online_worker import ( OnlineVehicleTrackingApp, ) app = OnlineVehicleTrackingApp( video_uuid=video_info.video_uuid, frame_width=frame_width, frame_height=frame_height, frame_rate=frame_rate, nxs_infer_url=INFER_URL,", "os import cv2 from nxs_libs.db import NxsDbFactory, NxsDbType from apps.vehicle_counting.app_types.app_request import ( InDbTrackingAppRequest,", "cv2.VideoCapture(video_info.video_url) frame_width = cap.get(cv2.CAP_PROP_FRAME_WIDTH) frame_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT) frame_rate = int(round(cap.get(cv2.CAP_PROP_FPS))) cap.release() rois =", "region in video_info.regions: points = [] for p in region.roi.points: points.append( NxsPoint( int(p.x", "os.environ[\"BLOBSTORE_CONN_STR\"] args.blobstore_container = os.environ[\"BLOBSTORE_CONTAINER\"] args.cosmosdb_conn_str = os.environ[\"COSMOSDB_URL\"] args.cosmosdb_db_name = os.environ[\"COSMOSDB_NAME\"] try: db_client =", "NxsDbFactory, NxsDbType from apps.vehicle_counting.app_types.app_request import ( InDbTrackingAppRequest, RequestStatus, ) from apps.vehicle_counting.worker.utils import *", "parser.add_argument(\"--nxs_url\", type=str) parser.add_argument(\"--nxs_api_key\", type=str) parser.add_argument( \"--object_detector_uuid\", type=str, default=\"bbff897256c9431eb19a2ad311749b39\", ) parser.add_argument( \"--tracker_uuid\", type=str, default=\"451ffc2ee1594fe2a6ace17fca5117ab\",", "points.append( NxsPoint( int(p.x * frame_width), int(p.y * frame_height), ) ) rois.append(NxsRoi(points=points)) line =", "blobstore_container_name=args.blobstore_container, cosmosdb_conn_str=args.cosmosdb_conn_str, cosmosdb_db_name=args.cosmosdb_db_name, counting_report_interval_secs=video_info.count_interval_secs, job_duration=video_info.job_duration, ) else: from apps.vehicle_counting.worker.offline_worker import ( OfflineVehicleTrackingApp, )", "\"zone\": \"global\", }, {\"status\": RequestStatus.FAILED, \"error\": str(e)}, ) if __name__ == \"__main__\": main()", "cap.get(cv2.CAP_PROP_FRAME_WIDTH) frame_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT) frame_rate = int(round(cap.get(cv2.CAP_PROP_FPS))) cap.release() rois = [] lines =", "args.nxs_url = os.environ[\"NXS_URL\"] args.nxs_api_key = os.environ[\"NXS_API_KEY\"] args.blobstore_conn_str = os.environ[\"BLOBSTORE_CONN_STR\"] args.blobstore_container = os.environ[\"BLOBSTORE_CONTAINER\"] args.cosmosdb_conn_str", "= cv2.VideoCapture(video_info.video_url) frame_width = cap.get(cv2.CAP_PROP_FRAME_WIDTH) frame_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT) frame_rate = int(round(cap.get(cv2.CAP_PROP_FPS))) cap.release() rois", "args.video_uuid, \"zone\": \"global\", }, {\"status\": RequestStatus.RUNNING}, ) video_info = InDbTrackingAppRequest( **db_client.query( DB_TASKS_COLLECTION_NAME, {\"video_uuid\":", "nxs_infer_url=INFER_URL, nxs_api_key=args.nxs_api_key, detector_uuid=OBJECT_DETECTOR_UUID, tracker_uuid=TRACKER_UUID, video_url=video_info.video_url, rois=rois, lines=lines, tracking_classes=video_info.tracking_classes, visualize=False, collect_logs=args.debug, skip_frame=video_info.skip_frames, blobstore_conn_str=args.blobstore_conn_str, blobstore_container_name=args.blobstore_container,", "db_name=args.cosmosdb_db_name, ) db_client.update( DB_TASKS_COLLECTION_NAME, { \"video_uuid\": args.video_uuid, \"zone\": \"global\", }, {\"status\": RequestStatus.RUNNING}, )", "DB_TASKS_COLLECTION_NAME, { \"video_uuid\": args.video_uuid, \"zone\": \"global\", }, {\"status\": RequestStatus.FAILED, \"error\": \"stream ended\"}, )", "DB_LOGS_COLLECTION_NAME = \"logs\" STORAGE_LOGS_DIR_PATH = \"logs\" def main(): import argparse parser = argparse.ArgumentParser(description=\"Vehicle", "frame_width=frame_width, frame_height=frame_height, frame_rate=frame_rate, nxs_infer_url=INFER_URL, nxs_api_key=args.nxs_api_key, detector_uuid=OBJECT_DETECTOR_UUID, tracker_uuid=TRACKER_UUID, video_url=video_info.video_url, rois=rois, lines=lines, tracking_classes=video_info.tracking_classes, visualize=False, collect_logs=args.debug,", "argparse.ArgumentParser(description=\"Vehicle Counting App\") parser.add_argument(\"--video_uuid\", type=str) parser.add_argument(\"--nxs_url\", type=str) parser.add_argument(\"--nxs_api_key\", type=str) parser.add_argument( \"--object_detector_uuid\", type=str, default=\"bbff897256c9431eb19a2ad311749b39\",", ")[0] ) if video_info.skip_frames is None: video_info.skip_frames = 3 if video_info.count_interval_secs is None:", "), p1=NxsPoint( x=int(line.p1.x * frame_width), y=int(line.p1.y * frame_height), ), ) ) if \".m3u8\"", "args.object_detector_uuid TRACKER_UUID = args.tracker_uuid cap = cv2.VideoCapture(video_info.video_url) frame_width = cap.get(cv2.CAP_PROP_FRAME_WIDTH) frame_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)", "type=str, default=\"451ffc2ee1594fe2a6ace17fca5117ab\", ) parser.add_argument(\"--blobstore_conn_str\", type=str) parser.add_argument(\"--blobstore_container\", type=str) parser.add_argument(\"--cosmosdb_conn_str\", type=str) parser.add_argument(\"--cosmosdb_db_name\", type=str) parser.add_argument( \"--debug\",", "**db_client.query( DB_TASKS_COLLECTION_NAME, {\"video_uuid\": args.video_uuid} )[0] ) if video_info.skip_frames is None: video_info.skip_frames = 3", "<filename>apps/vehicle_counting/worker/worker.py import os import cv2 from nxs_libs.db import NxsDbFactory, NxsDbType from apps.vehicle_counting.app_types.app_request import", "default=False, type=lambda x: (str(x).lower() == \"true\") ) args = parser.parse_args() args.video_uuid = os.environ[\"VIDEO_UUID\"]", "as e: print(e) db_client = NxsDbFactory.create_db( NxsDbType.MONGODB, uri=args.cosmosdb_conn_str, db_name=args.cosmosdb_db_name, ) db_client.update( DB_TASKS_COLLECTION_NAME, {", "NxsDbType from apps.vehicle_counting.app_types.app_request import ( InDbTrackingAppRequest, RequestStatus, ) from apps.vehicle_counting.worker.utils import * DB_TASKS_COLLECTION_NAME", "= os.environ[\"NXS_URL\"] args.nxs_api_key = os.environ[\"NXS_API_KEY\"] args.blobstore_conn_str = os.environ[\"BLOBSTORE_CONN_STR\"] args.blobstore_container = os.environ[\"BLOBSTORE_CONTAINER\"] args.cosmosdb_conn_str =", "{ \"video_uuid\": args.video_uuid, \"zone\": \"global\", }, {\"status\": RequestStatus.FAILED, \"error\": str(e)}, ) if __name__", "if app.job_completed: db_client.update( DB_TASKS_COLLECTION_NAME, { \"video_uuid\": args.video_uuid, \"zone\": \"global\", }, {\"status\": RequestStatus.COMPLETED, \"error\":", "db_client = NxsDbFactory.create_db( NxsDbType.MONGODB, uri=args.cosmosdb_conn_str, db_name=args.cosmosdb_db_name, ) if app.job_completed: db_client.update( DB_TASKS_COLLECTION_NAME, { \"video_uuid\":", "frame_rate=frame_rate, nxs_infer_url=INFER_URL, nxs_api_key=args.nxs_api_key, detector_uuid=OBJECT_DETECTOR_UUID, tracker_uuid=TRACKER_UUID, video_url=video_info.video_url, rois=rois, lines=lines, tracking_classes=video_info.tracking_classes, visualize=False, collect_logs=args.debug, skip_frame=video_info.skip_frames, blobstore_conn_str=args.blobstore_conn_str,", "args.blobstore_container = os.environ[\"BLOBSTORE_CONTAINER\"] args.cosmosdb_conn_str = os.environ[\"COSMOSDB_URL\"] args.cosmosdb_db_name = os.environ[\"COSMOSDB_NAME\"] try: db_client = NxsDbFactory.create_db(", ") except Exception as e: print(e) db_client = NxsDbFactory.create_db( NxsDbType.MONGODB, uri=args.cosmosdb_conn_str, db_name=args.cosmosdb_db_name, )", "* frame_height), ), ) ) if \".m3u8\" not in video_info.video_url: from apps.vehicle_counting.worker.online_worker import", "cap.get(cv2.CAP_PROP_FRAME_HEIGHT) frame_rate = int(round(cap.get(cv2.CAP_PROP_FPS))) cap.release() rois = [] lines = [] for region", "frame_width), y=int(line.p0.y * frame_height), ), p1=NxsPoint( x=int(line.p1.x * frame_width), y=int(line.p1.y * frame_height), ),", "parser.add_argument(\"--cosmosdb_db_name\", type=str) parser.add_argument( \"--debug\", default=False, type=lambda x: (str(x).lower() == \"true\") ) args =", "uri=args.cosmosdb_conn_str, db_name=args.cosmosdb_db_name, ) db_client.update( DB_TASKS_COLLECTION_NAME, { \"video_uuid\": args.video_uuid, \"zone\": \"global\", }, {\"status\": RequestStatus.RUNNING},", "= cap.get(cv2.CAP_PROP_FRAME_HEIGHT) frame_rate = int(round(cap.get(cv2.CAP_PROP_FPS))) cap.release() rois = [] lines = [] for", "is None: video_info.skip_frames = 3 if video_info.count_interval_secs is None: video_info.count_interval_secs = 900 #", "\"video_uuid\": args.video_uuid, \"zone\": \"global\", }, {\"status\": RequestStatus.COMPLETED, \"error\": \"\"}, ) else: db_client.update( DB_TASKS_COLLECTION_NAME,", "{ \"video_uuid\": args.video_uuid, \"zone\": \"global\", }, {\"status\": RequestStatus.COMPLETED, \"error\": \"\"}, ) else: db_client.update(", "= os.environ[\"BLOBSTORE_CONN_STR\"] args.blobstore_container = os.environ[\"BLOBSTORE_CONTAINER\"] args.cosmosdb_conn_str = os.environ[\"COSMOSDB_URL\"] args.cosmosdb_db_name = os.environ[\"COSMOSDB_NAME\"] try: db_client", "args.blobstore_conn_str = os.environ[\"BLOBSTORE_CONN_STR\"] args.blobstore_container = os.environ[\"BLOBSTORE_CONTAINER\"] args.cosmosdb_conn_str = os.environ[\"COSMOSDB_URL\"] args.cosmosdb_db_name = os.environ[\"COSMOSDB_NAME\"] try:", "frame_width), y=int(line.p1.y * frame_height), ), ) ) if \".m3u8\" not in video_info.video_url: from", "= args.tracker_uuid cap = cv2.VideoCapture(video_info.video_url) frame_width = cap.get(cv2.CAP_PROP_FRAME_WIDTH) frame_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT) frame_rate =", "}, {\"status\": RequestStatus.COMPLETED, \"error\": \"\"}, ) else: db_client.update( DB_TASKS_COLLECTION_NAME, { \"video_uuid\": args.video_uuid, \"zone\":", "uri=args.cosmosdb_conn_str, db_name=args.cosmosdb_db_name, ) db_client.update( DB_TASKS_COLLECTION_NAME, { \"video_uuid\": args.video_uuid, \"zone\": \"global\", }, {\"status\": RequestStatus.FAILED,", "from apps.vehicle_counting.worker.offline_worker import ( OfflineVehicleTrackingApp, ) app = OfflineVehicleTrackingApp( video_uuid=video_info.video_uuid, frame_width=frame_width, frame_height=frame_height, frame_rate=frame_rate,", "args.cosmosdb_conn_str = os.environ[\"COSMOSDB_URL\"] args.cosmosdb_db_name = os.environ[\"COSMOSDB_NAME\"] try: db_client = NxsDbFactory.create_db( NxsDbType.MONGODB, uri=args.cosmosdb_conn_str, db_name=args.cosmosdb_db_name,", "rois=rois, lines=lines, tracking_classes=video_info.tracking_classes, visualize=False, collect_logs=args.debug, skip_frame=video_info.skip_frames, blobstore_conn_str=args.blobstore_conn_str, blobstore_container_name=args.blobstore_container, cosmosdb_conn_str=args.cosmosdb_conn_str, cosmosdb_db_name=args.cosmosdb_db_name, counting_report_interval_secs=video_info.count_interval_secs, job_duration=video_info.job_duration, )", "\"zone\": \"global\", }, {\"status\": RequestStatus.COMPLETED, \"error\": \"\"}, ) else: db_client.update( DB_TASKS_COLLECTION_NAME, { \"video_uuid\":", ") else: db_client.update( DB_TASKS_COLLECTION_NAME, { \"video_uuid\": args.video_uuid, \"zone\": \"global\", }, {\"status\": RequestStatus.FAILED, \"error\":", "TRACKER_UUID = args.tracker_uuid cap = cv2.VideoCapture(video_info.video_url) frame_width = cap.get(cv2.CAP_PROP_FRAME_WIDTH) frame_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT) frame_rate", "int(p.x * frame_width), int(p.y * frame_height), ) ) rois.append(NxsRoi(points=points)) line = region.line lines.append(", "def main(): import argparse parser = argparse.ArgumentParser(description=\"Vehicle Counting App\") parser.add_argument(\"--video_uuid\", type=str) parser.add_argument(\"--nxs_url\", type=str)", "\"\"}, ) else: db_client.update( DB_TASKS_COLLECTION_NAME, { \"video_uuid\": args.video_uuid, \"zone\": \"global\", }, {\"status\": RequestStatus.FAILED,", ") db_client.update( DB_TASKS_COLLECTION_NAME, { \"video_uuid\": args.video_uuid, \"zone\": \"global\", }, {\"status\": RequestStatus.FAILED, \"error\": str(e)},", "args.video_uuid} )[0] ) if video_info.skip_frames is None: video_info.skip_frames = 3 if video_info.count_interval_secs is", "apps.vehicle_counting.app_types.app_request import ( InDbTrackingAppRequest, RequestStatus, ) from apps.vehicle_counting.worker.utils import * DB_TASKS_COLLECTION_NAME = \"tasks\"", "type=str) parser.add_argument(\"--nxs_api_key\", type=str) parser.add_argument( \"--object_detector_uuid\", type=str, default=\"bbff897256c9431eb19a2ad311749b39\", ) parser.add_argument( \"--tracker_uuid\", type=str, default=\"451ffc2ee1594fe2a6ace17fca5117ab\", )", ") video_info = InDbTrackingAppRequest( **db_client.query( DB_TASKS_COLLECTION_NAME, {\"video_uuid\": args.video_uuid} )[0] ) if video_info.skip_frames is", "video_info.regions: points = [] for p in region.roi.points: points.append( NxsPoint( int(p.x * frame_width),", "p0=NxsPoint( x=int(line.p0.x * frame_width), y=int(line.p0.y * frame_height), ), p1=NxsPoint( x=int(line.p1.x * frame_width), y=int(line.p1.y", "( OnlineVehicleTrackingApp, ) app = OnlineVehicleTrackingApp( video_uuid=video_info.video_uuid, frame_width=frame_width, frame_height=frame_height, frame_rate=frame_rate, nxs_infer_url=INFER_URL, nxs_api_key=args.nxs_api_key, detector_uuid=OBJECT_DETECTOR_UUID,", "Exception as e: print(e) db_client = NxsDbFactory.create_db( NxsDbType.MONGODB, uri=args.cosmosdb_conn_str, db_name=args.cosmosdb_db_name, ) db_client.update( DB_TASKS_COLLECTION_NAME,", "if video_info.count_interval_secs is None: video_info.count_interval_secs = 900 # 15 mins INFER_URL = f\"{args.nxs_url}/api/v2/tasks/tensors/infer\"", "is None: video_info.count_interval_secs = 900 # 15 mins INFER_URL = f\"{args.nxs_url}/api/v2/tasks/tensors/infer\" OBJECT_DETECTOR_UUID =", "lines.append( NxsLine( p0=NxsPoint( x=int(line.p0.x * frame_width), y=int(line.p0.y * frame_height), ), p1=NxsPoint( x=int(line.p1.x *", "main(): import argparse parser = argparse.ArgumentParser(description=\"Vehicle Counting App\") parser.add_argument(\"--video_uuid\", type=str) parser.add_argument(\"--nxs_url\", type=str) parser.add_argument(\"--nxs_api_key\",", "{\"status\": RequestStatus.RUNNING}, ) video_info = InDbTrackingAppRequest( **db_client.query( DB_TASKS_COLLECTION_NAME, {\"video_uuid\": args.video_uuid} )[0] ) if", "try: db_client = NxsDbFactory.create_db( NxsDbType.MONGODB, uri=args.cosmosdb_conn_str, db_name=args.cosmosdb_db_name, ) db_client.update( DB_TASKS_COLLECTION_NAME, { \"video_uuid\": args.video_uuid,", "video_uuid=video_info.video_uuid, frame_width=frame_width, frame_height=frame_height, frame_rate=frame_rate, nxs_infer_url=INFER_URL, nxs_api_key=args.nxs_api_key, detector_uuid=OBJECT_DETECTOR_UUID, tracker_uuid=TRACKER_UUID, video_url=video_info.video_url, rois=rois, lines=lines, tracking_classes=video_info.tracking_classes, visualize=False,", "db_name=args.cosmosdb_db_name, ) db_client.update( DB_TASKS_COLLECTION_NAME, { \"video_uuid\": args.video_uuid, \"zone\": \"global\", }, {\"status\": RequestStatus.FAILED, \"error\":", "in region.roi.points: points.append( NxsPoint( int(p.x * frame_width), int(p.y * frame_height), ) ) rois.append(NxsRoi(points=points))", "InDbTrackingAppRequest( **db_client.query( DB_TASKS_COLLECTION_NAME, {\"video_uuid\": args.video_uuid} )[0] ) if video_info.skip_frames is None: video_info.skip_frames =", "counting_report_interval_secs=video_info.count_interval_secs, job_duration=video_info.job_duration, ) else: from apps.vehicle_counting.worker.offline_worker import ( OfflineVehicleTrackingApp, ) app = OfflineVehicleTrackingApp(", "counting_report_interval_secs=video_info.count_interval_secs, job_duration=video_info.job_duration, ) app.run_tracking() db_client = NxsDbFactory.create_db( NxsDbType.MONGODB, uri=args.cosmosdb_conn_str, db_name=args.cosmosdb_db_name, ) if app.job_completed:", "video_info.video_url: from apps.vehicle_counting.worker.online_worker import ( OnlineVehicleTrackingApp, ) app = OnlineVehicleTrackingApp( video_uuid=video_info.video_uuid, frame_width=frame_width, frame_height=frame_height,", ") parser.add_argument(\"--blobstore_conn_str\", type=str) parser.add_argument(\"--blobstore_container\", type=str) parser.add_argument(\"--cosmosdb_conn_str\", type=str) parser.add_argument(\"--cosmosdb_db_name\", type=str) parser.add_argument( \"--debug\", default=False, type=lambda", "video_info = InDbTrackingAppRequest( **db_client.query( DB_TASKS_COLLECTION_NAME, {\"video_uuid\": args.video_uuid} )[0] ) if video_info.skip_frames is None:", "\"global\", }, {\"status\": RequestStatus.COMPLETED, \"error\": \"\"}, ) else: db_client.update( DB_TASKS_COLLECTION_NAME, { \"video_uuid\": args.video_uuid,", "in video_info.video_url: from apps.vehicle_counting.worker.online_worker import ( OnlineVehicleTrackingApp, ) app = OnlineVehicleTrackingApp( video_uuid=video_info.video_uuid, frame_width=frame_width,", "default=\"bbff897256c9431eb19a2ad311749b39\", ) parser.add_argument( \"--tracker_uuid\", type=str, default=\"451ffc2ee1594fe2a6ace17fca5117ab\", ) parser.add_argument(\"--blobstore_conn_str\", type=str) parser.add_argument(\"--blobstore_container\", type=str) parser.add_argument(\"--cosmosdb_conn_str\", type=str)", "args.video_uuid, \"zone\": \"global\", }, {\"status\": RequestStatus.COMPLETED, \"error\": \"\"}, ) else: db_client.update( DB_TASKS_COLLECTION_NAME, {", "args.video_uuid, \"zone\": \"global\", }, {\"status\": RequestStatus.FAILED, \"error\": str(e)}, ) if __name__ == \"__main__\":", "Counting App\") parser.add_argument(\"--video_uuid\", type=str) parser.add_argument(\"--nxs_url\", type=str) parser.add_argument(\"--nxs_api_key\", type=str) parser.add_argument( \"--object_detector_uuid\", type=str, default=\"bbff897256c9431eb19a2ad311749b39\", )", "blobstore_conn_str=args.blobstore_conn_str, blobstore_container_name=args.blobstore_container, cosmosdb_conn_str=args.cosmosdb_conn_str, cosmosdb_db_name=args.cosmosdb_db_name, counting_report_interval_secs=video_info.count_interval_secs, job_duration=video_info.job_duration, ) app.run_tracking() db_client = NxsDbFactory.create_db( NxsDbType.MONGODB, uri=args.cosmosdb_conn_str,", "cosmosdb_conn_str=args.cosmosdb_conn_str, cosmosdb_db_name=args.cosmosdb_db_name, counting_report_interval_secs=video_info.count_interval_secs, job_duration=video_info.job_duration, ) app.run_tracking() db_client = NxsDbFactory.create_db( NxsDbType.MONGODB, uri=args.cosmosdb_conn_str, db_name=args.cosmosdb_db_name, )", "collect_logs=args.debug, skip_frame=video_info.skip_frames, blobstore_conn_str=args.blobstore_conn_str, blobstore_container_name=args.blobstore_container, cosmosdb_conn_str=args.cosmosdb_conn_str, cosmosdb_db_name=args.cosmosdb_db_name, counting_report_interval_secs=video_info.count_interval_secs, job_duration=video_info.job_duration, ) app.run_tracking() db_client = NxsDbFactory.create_db(", "{\"video_uuid\": args.video_uuid} )[0] ) if video_info.skip_frames is None: video_info.skip_frames = 3 if video_info.count_interval_secs", "\"video_uuid\": args.video_uuid, \"zone\": \"global\", }, {\"status\": RequestStatus.FAILED, \"error\": str(e)}, ) if __name__ ==", "cv2 from nxs_libs.db import NxsDbFactory, NxsDbType from apps.vehicle_counting.app_types.app_request import ( InDbTrackingAppRequest, RequestStatus, )", "\"global\", }, {\"status\": RequestStatus.FAILED, \"error\": \"stream ended\"}, ) except Exception as e: print(e)", "* DB_TASKS_COLLECTION_NAME = \"tasks\" DB_COUNTS_COLLECTION_NAME = \"counts\" DB_LOGS_COLLECTION_NAME = \"logs\" STORAGE_LOGS_DIR_PATH = \"logs\"", "nxs_libs.db import NxsDbFactory, NxsDbType from apps.vehicle_counting.app_types.app_request import ( InDbTrackingAppRequest, RequestStatus, ) from apps.vehicle_counting.worker.utils", "in video_info.regions: points = [] for p in region.roi.points: points.append( NxsPoint( int(p.x *", ") parser.add_argument( \"--tracker_uuid\", type=str, default=\"451ffc2ee1594fe2a6ace17fca5117ab\", ) parser.add_argument(\"--blobstore_conn_str\", type=str) parser.add_argument(\"--blobstore_container\", type=str) parser.add_argument(\"--cosmosdb_conn_str\", type=str) parser.add_argument(\"--cosmosdb_db_name\",", "for region in video_info.regions: points = [] for p in region.roi.points: points.append( NxsPoint(", "db_client.update( DB_TASKS_COLLECTION_NAME, { \"video_uuid\": args.video_uuid, \"zone\": \"global\", }, {\"status\": RequestStatus.RUNNING}, ) video_info =", "parser.add_argument(\"--nxs_api_key\", type=str) parser.add_argument( \"--object_detector_uuid\", type=str, default=\"bbff897256c9431eb19a2ad311749b39\", ) parser.add_argument( \"--tracker_uuid\", type=str, default=\"451ffc2ee1594fe2a6ace17fca5117ab\", ) parser.add_argument(\"--blobstore_conn_str\",", "e: print(e) db_client = NxsDbFactory.create_db( NxsDbType.MONGODB, uri=args.cosmosdb_conn_str, db_name=args.cosmosdb_db_name, ) db_client.update( DB_TASKS_COLLECTION_NAME, { \"video_uuid\":", "video_info.count_interval_secs = 900 # 15 mins INFER_URL = f\"{args.nxs_url}/api/v2/tasks/tensors/infer\" OBJECT_DETECTOR_UUID = args.object_detector_uuid TRACKER_UUID", "= \"logs\" STORAGE_LOGS_DIR_PATH = \"logs\" def main(): import argparse parser = argparse.ArgumentParser(description=\"Vehicle Counting", "except Exception as e: print(e) db_client = NxsDbFactory.create_db( NxsDbType.MONGODB, uri=args.cosmosdb_conn_str, db_name=args.cosmosdb_db_name, ) db_client.update(", "\"--tracker_uuid\", type=str, default=\"451ffc2ee1594fe2a6ace17fca5117ab\", ) parser.add_argument(\"--blobstore_conn_str\", type=str) parser.add_argument(\"--blobstore_container\", type=str) parser.add_argument(\"--cosmosdb_conn_str\", type=str) parser.add_argument(\"--cosmosdb_db_name\", type=str) parser.add_argument(", "frame_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT) frame_rate = int(round(cap.get(cv2.CAP_PROP_FPS))) cap.release() rois = [] lines = []", "= region.line lines.append( NxsLine( p0=NxsPoint( x=int(line.p0.x * frame_width), y=int(line.p0.y * frame_height), ), p1=NxsPoint(", "= os.environ[\"VIDEO_UUID\"] args.nxs_url = os.environ[\"NXS_URL\"] args.nxs_api_key = os.environ[\"NXS_API_KEY\"] args.blobstore_conn_str = os.environ[\"BLOBSTORE_CONN_STR\"] args.blobstore_container =", "if \".m3u8\" not in video_info.video_url: from apps.vehicle_counting.worker.online_worker import ( OnlineVehicleTrackingApp, ) app =", "tracking_classes=video_info.tracking_classes, visualize=False, collect_logs=args.debug, skip_frame=video_info.skip_frames, blobstore_conn_str=args.blobstore_conn_str, blobstore_container_name=args.blobstore_container, cosmosdb_conn_str=args.cosmosdb_conn_str, cosmosdb_db_name=args.cosmosdb_db_name, counting_report_interval_secs=video_info.count_interval_secs, job_duration=video_info.job_duration, ) else: from", "collect_logs=args.debug, skip_frame=video_info.skip_frames, blobstore_conn_str=args.blobstore_conn_str, blobstore_container_name=args.blobstore_container, cosmosdb_conn_str=args.cosmosdb_conn_str, cosmosdb_db_name=args.cosmosdb_db_name, counting_report_interval_secs=video_info.count_interval_secs, job_duration=video_info.job_duration, ) else: from apps.vehicle_counting.worker.offline_worker import", "NxsDbFactory.create_db( NxsDbType.MONGODB, uri=args.cosmosdb_conn_str, db_name=args.cosmosdb_db_name, ) db_client.update( DB_TASKS_COLLECTION_NAME, { \"video_uuid\": args.video_uuid, \"zone\": \"global\", },", "cosmosdb_db_name=args.cosmosdb_db_name, counting_report_interval_secs=video_info.count_interval_secs, job_duration=video_info.job_duration, ) else: from apps.vehicle_counting.worker.offline_worker import ( OfflineVehicleTrackingApp, ) app =", ") if video_info.skip_frames is None: video_info.skip_frames = 3 if video_info.count_interval_secs is None: video_info.count_interval_secs", "= os.environ[\"NXS_API_KEY\"] args.blobstore_conn_str = os.environ[\"BLOBSTORE_CONN_STR\"] args.blobstore_container = os.environ[\"BLOBSTORE_CONTAINER\"] args.cosmosdb_conn_str = os.environ[\"COSMOSDB_URL\"] args.cosmosdb_db_name =", "= cap.get(cv2.CAP_PROP_FRAME_WIDTH) frame_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT) frame_rate = int(round(cap.get(cv2.CAP_PROP_FPS))) cap.release() rois = [] lines", "db_client = NxsDbFactory.create_db( NxsDbType.MONGODB, uri=args.cosmosdb_conn_str, db_name=args.cosmosdb_db_name, ) db_client.update( DB_TASKS_COLLECTION_NAME, { \"video_uuid\": args.video_uuid, \"zone\":", "parser.add_argument( \"--tracker_uuid\", type=str, default=\"451ffc2ee1594fe2a6ace17fca5117ab\", ) parser.add_argument(\"--blobstore_conn_str\", type=str) parser.add_argument(\"--blobstore_container\", type=str) parser.add_argument(\"--cosmosdb_conn_str\", type=str) parser.add_argument(\"--cosmosdb_db_name\", type=str)", "default=\"451ffc2ee1594fe2a6ace17fca5117ab\", ) parser.add_argument(\"--blobstore_conn_str\", type=str) parser.add_argument(\"--blobstore_container\", type=str) parser.add_argument(\"--cosmosdb_conn_str\", type=str) parser.add_argument(\"--cosmosdb_db_name\", type=str) parser.add_argument( \"--debug\", default=False,", "blobstore_container_name=args.blobstore_container, cosmosdb_conn_str=args.cosmosdb_conn_str, cosmosdb_db_name=args.cosmosdb_db_name, counting_report_interval_secs=video_info.count_interval_secs, job_duration=video_info.job_duration, ) app.run_tracking() db_client = NxsDbFactory.create_db( NxsDbType.MONGODB, uri=args.cosmosdb_conn_str, db_name=args.cosmosdb_db_name,", "type=str, default=\"bbff897256c9431eb19a2ad311749b39\", ) parser.add_argument( \"--tracker_uuid\", type=str, default=\"451ffc2ee1594fe2a6ace17fca5117ab\", ) parser.add_argument(\"--blobstore_conn_str\", type=str) parser.add_argument(\"--blobstore_container\", type=str) parser.add_argument(\"--cosmosdb_conn_str\",", "import ( InDbTrackingAppRequest, RequestStatus, ) from apps.vehicle_counting.worker.utils import * DB_TASKS_COLLECTION_NAME = \"tasks\" DB_COUNTS_COLLECTION_NAME", "apps.vehicle_counting.worker.utils import * DB_TASKS_COLLECTION_NAME = \"tasks\" DB_COUNTS_COLLECTION_NAME = \"counts\" DB_LOGS_COLLECTION_NAME = \"logs\" STORAGE_LOGS_DIR_PATH", "os.environ[\"COSMOSDB_NAME\"] try: db_client = NxsDbFactory.create_db( NxsDbType.MONGODB, uri=args.cosmosdb_conn_str, db_name=args.cosmosdb_db_name, ) db_client.update( DB_TASKS_COLLECTION_NAME, { \"video_uuid\":", "frame_height=frame_height, frame_rate=frame_rate, nxs_infer_url=INFER_URL, nxs_api_key=args.nxs_api_key, detector_uuid=OBJECT_DETECTOR_UUID, tracker_uuid=TRACKER_UUID, video_url=video_info.video_url, rois=rois, lines=lines, tracking_classes=video_info.tracking_classes, visualize=False, collect_logs=args.debug, skip_frame=video_info.skip_frames,", "print(e) db_client = NxsDbFactory.create_db( NxsDbType.MONGODB, uri=args.cosmosdb_conn_str, db_name=args.cosmosdb_db_name, ) db_client.update( DB_TASKS_COLLECTION_NAME, { \"video_uuid\": args.video_uuid,", "visualize=False, collect_logs=args.debug, skip_frame=video_info.skip_frames, blobstore_conn_str=args.blobstore_conn_str, blobstore_container_name=args.blobstore_container, cosmosdb_conn_str=args.cosmosdb_conn_str, cosmosdb_db_name=args.cosmosdb_db_name, counting_report_interval_secs=video_info.count_interval_secs, job_duration=video_info.job_duration, ) app.run_tracking() db_client =", "import * DB_TASKS_COLLECTION_NAME = \"tasks\" DB_COUNTS_COLLECTION_NAME = \"counts\" DB_LOGS_COLLECTION_NAME = \"logs\" STORAGE_LOGS_DIR_PATH =", "= f\"{args.nxs_url}/api/v2/tasks/tensors/infer\" OBJECT_DETECTOR_UUID = args.object_detector_uuid TRACKER_UUID = args.tracker_uuid cap = cv2.VideoCapture(video_info.video_url) frame_width =", "frame_height), ), ) ) if \".m3u8\" not in video_info.video_url: from apps.vehicle_counting.worker.online_worker import (", "= OfflineVehicleTrackingApp( video_uuid=video_info.video_uuid, frame_width=frame_width, frame_height=frame_height, frame_rate=frame_rate, nxs_infer_url=INFER_URL, nxs_api_key=args.nxs_api_key, detector_uuid=OBJECT_DETECTOR_UUID, tracker_uuid=TRACKER_UUID, video_url=video_info.video_url, rois=rois, lines=lines,", "= InDbTrackingAppRequest( **db_client.query( DB_TASKS_COLLECTION_NAME, {\"video_uuid\": args.video_uuid} )[0] ) if video_info.skip_frames is None: video_info.skip_frames", "parser.add_argument(\"--blobstore_container\", type=str) parser.add_argument(\"--cosmosdb_conn_str\", type=str) parser.add_argument(\"--cosmosdb_db_name\", type=str) parser.add_argument( \"--debug\", default=False, type=lambda x: (str(x).lower() ==", "line = region.line lines.append( NxsLine( p0=NxsPoint( x=int(line.p0.x * frame_width), y=int(line.p0.y * frame_height), ),", "DB_TASKS_COLLECTION_NAME, { \"video_uuid\": args.video_uuid, \"zone\": \"global\", }, {\"status\": RequestStatus.RUNNING}, ) video_info = InDbTrackingAppRequest(", "= argparse.ArgumentParser(description=\"Vehicle Counting App\") parser.add_argument(\"--video_uuid\", type=str) parser.add_argument(\"--nxs_url\", type=str) parser.add_argument(\"--nxs_api_key\", type=str) parser.add_argument( \"--object_detector_uuid\", type=str,", "* frame_width), y=int(line.p1.y * frame_height), ), ) ) if \".m3u8\" not in video_info.video_url:", "= [] lines = [] for region in video_info.regions: points = [] for", "mins INFER_URL = f\"{args.nxs_url}/api/v2/tasks/tensors/infer\" OBJECT_DETECTOR_UUID = args.object_detector_uuid TRACKER_UUID = args.tracker_uuid cap = cv2.VideoCapture(video_info.video_url)", "{ \"video_uuid\": args.video_uuid, \"zone\": \"global\", }, {\"status\": RequestStatus.FAILED, \"error\": \"stream ended\"}, ) except", "= 900 # 15 mins INFER_URL = f\"{args.nxs_url}/api/v2/tasks/tensors/infer\" OBJECT_DETECTOR_UUID = args.object_detector_uuid TRACKER_UUID =", "from apps.vehicle_counting.worker.utils import * DB_TASKS_COLLECTION_NAME = \"tasks\" DB_COUNTS_COLLECTION_NAME = \"counts\" DB_LOGS_COLLECTION_NAME = \"logs\"", "frame_height), ), p1=NxsPoint( x=int(line.p1.x * frame_width), y=int(line.p1.y * frame_height), ), ) ) if", "= \"tasks\" DB_COUNTS_COLLECTION_NAME = \"counts\" DB_LOGS_COLLECTION_NAME = \"logs\" STORAGE_LOGS_DIR_PATH = \"logs\" def main():", "db_client.update( DB_TASKS_COLLECTION_NAME, { \"video_uuid\": args.video_uuid, \"zone\": \"global\", }, {\"status\": RequestStatus.FAILED, \"error\": str(e)}, )", "y=int(line.p0.y * frame_height), ), p1=NxsPoint( x=int(line.p1.x * frame_width), y=int(line.p1.y * frame_height), ), )", "= NxsDbFactory.create_db( NxsDbType.MONGODB, uri=args.cosmosdb_conn_str, db_name=args.cosmosdb_db_name, ) db_client.update( DB_TASKS_COLLECTION_NAME, { \"video_uuid\": args.video_uuid, \"zone\": \"global\",", "parser.add_argument( \"--object_detector_uuid\", type=str, default=\"bbff897256c9431eb19a2ad311749b39\", ) parser.add_argument( \"--tracker_uuid\", type=str, default=\"451ffc2ee1594fe2a6ace17fca5117ab\", ) parser.add_argument(\"--blobstore_conn_str\", type=str) parser.add_argument(\"--blobstore_container\",", "from nxs_libs.db import NxsDbFactory, NxsDbType from apps.vehicle_counting.app_types.app_request import ( InDbTrackingAppRequest, RequestStatus, ) from", ") rois.append(NxsRoi(points=points)) line = region.line lines.append( NxsLine( p0=NxsPoint( x=int(line.p0.x * frame_width), y=int(line.p0.y *", "os.environ[\"VIDEO_UUID\"] args.nxs_url = os.environ[\"NXS_URL\"] args.nxs_api_key = os.environ[\"NXS_API_KEY\"] args.blobstore_conn_str = os.environ[\"BLOBSTORE_CONN_STR\"] args.blobstore_container = os.environ[\"BLOBSTORE_CONTAINER\"]", "parser.add_argument(\"--cosmosdb_conn_str\", type=str) parser.add_argument(\"--cosmosdb_db_name\", type=str) parser.add_argument( \"--debug\", default=False, type=lambda x: (str(x).lower() == \"true\") )", "None: video_info.skip_frames = 3 if video_info.count_interval_secs is None: video_info.count_interval_secs = 900 # 15", "= OnlineVehicleTrackingApp( video_uuid=video_info.video_uuid, frame_width=frame_width, frame_height=frame_height, frame_rate=frame_rate, nxs_infer_url=INFER_URL, nxs_api_key=args.nxs_api_key, detector_uuid=OBJECT_DETECTOR_UUID, tracker_uuid=TRACKER_UUID, video_url=video_info.video_url, rois=rois, lines=lines,", "parser.add_argument(\"--video_uuid\", type=str) parser.add_argument(\"--nxs_url\", type=str) parser.add_argument(\"--nxs_api_key\", type=str) parser.add_argument( \"--object_detector_uuid\", type=str, default=\"bbff897256c9431eb19a2ad311749b39\", ) parser.add_argument( \"--tracker_uuid\",", "os.environ[\"NXS_API_KEY\"] args.blobstore_conn_str = os.environ[\"BLOBSTORE_CONN_STR\"] args.blobstore_container = os.environ[\"BLOBSTORE_CONTAINER\"] args.cosmosdb_conn_str = os.environ[\"COSMOSDB_URL\"] args.cosmosdb_db_name = os.environ[\"COSMOSDB_NAME\"]", ") app = OnlineVehicleTrackingApp( video_uuid=video_info.video_uuid, frame_width=frame_width, frame_height=frame_height, frame_rate=frame_rate, nxs_infer_url=INFER_URL, nxs_api_key=args.nxs_api_key, detector_uuid=OBJECT_DETECTOR_UUID, tracker_uuid=TRACKER_UUID, video_url=video_info.video_url,", "OBJECT_DETECTOR_UUID = args.object_detector_uuid TRACKER_UUID = args.tracker_uuid cap = cv2.VideoCapture(video_info.video_url) frame_width = cap.get(cv2.CAP_PROP_FRAME_WIDTH) frame_height", "points = [] for p in region.roi.points: points.append( NxsPoint( int(p.x * frame_width), int(p.y", "NxsDbFactory.create_db( NxsDbType.MONGODB, uri=args.cosmosdb_conn_str, db_name=args.cosmosdb_db_name, ) if app.job_completed: db_client.update( DB_TASKS_COLLECTION_NAME, { \"video_uuid\": args.video_uuid, \"zone\":", "frame_height), ) ) rois.append(NxsRoi(points=points)) line = region.line lines.append( NxsLine( p0=NxsPoint( x=int(line.p0.x * frame_width),", ") from apps.vehicle_counting.worker.utils import * DB_TASKS_COLLECTION_NAME = \"tasks\" DB_COUNTS_COLLECTION_NAME = \"counts\" DB_LOGS_COLLECTION_NAME =", "[] for p in region.roi.points: points.append( NxsPoint( int(p.x * frame_width), int(p.y * frame_height),", "else: from apps.vehicle_counting.worker.offline_worker import ( OfflineVehicleTrackingApp, ) app = OfflineVehicleTrackingApp( video_uuid=video_info.video_uuid, frame_width=frame_width, frame_height=frame_height,", "else: db_client.update( DB_TASKS_COLLECTION_NAME, { \"video_uuid\": args.video_uuid, \"zone\": \"global\", }, {\"status\": RequestStatus.FAILED, \"error\": \"stream", "= \"logs\" def main(): import argparse parser = argparse.ArgumentParser(description=\"Vehicle Counting App\") parser.add_argument(\"--video_uuid\", type=str)", "* frame_height), ) ) rois.append(NxsRoi(points=points)) line = region.line lines.append( NxsLine( p0=NxsPoint( x=int(line.p0.x *", "argparse parser = argparse.ArgumentParser(description=\"Vehicle Counting App\") parser.add_argument(\"--video_uuid\", type=str) parser.add_argument(\"--nxs_url\", type=str) parser.add_argument(\"--nxs_api_key\", type=str) parser.add_argument(", "region.roi.points: points.append( NxsPoint( int(p.x * frame_width), int(p.y * frame_height), ) ) rois.append(NxsRoi(points=points)) line", "tracker_uuid=TRACKER_UUID, video_url=video_info.video_url, rois=rois, lines=lines, tracking_classes=video_info.tracking_classes, visualize=False, collect_logs=args.debug, skip_frame=video_info.skip_frames, blobstore_conn_str=args.blobstore_conn_str, blobstore_container_name=args.blobstore_container, cosmosdb_conn_str=args.cosmosdb_conn_str, cosmosdb_db_name=args.cosmosdb_db_name, counting_report_interval_secs=video_info.count_interval_secs,", "* frame_height), ), p1=NxsPoint( x=int(line.p1.x * frame_width), y=int(line.p1.y * frame_height), ), ) )", "NxsLine( p0=NxsPoint( x=int(line.p0.x * frame_width), y=int(line.p0.y * frame_height), ), p1=NxsPoint( x=int(line.p1.x * frame_width),", "= NxsDbFactory.create_db( NxsDbType.MONGODB, uri=args.cosmosdb_conn_str, db_name=args.cosmosdb_db_name, ) if app.job_completed: db_client.update( DB_TASKS_COLLECTION_NAME, { \"video_uuid\": args.video_uuid,", "video_info.skip_frames = 3 if video_info.count_interval_secs is None: video_info.count_interval_secs = 900 # 15 mins", "NxsDbType.MONGODB, uri=args.cosmosdb_conn_str, db_name=args.cosmosdb_db_name, ) db_client.update( DB_TASKS_COLLECTION_NAME, { \"video_uuid\": args.video_uuid, \"zone\": \"global\", }, {\"status\":", "tracking_classes=video_info.tracking_classes, visualize=False, collect_logs=args.debug, skip_frame=video_info.skip_frames, blobstore_conn_str=args.blobstore_conn_str, blobstore_container_name=args.blobstore_container, cosmosdb_conn_str=args.cosmosdb_conn_str, cosmosdb_db_name=args.cosmosdb_db_name, counting_report_interval_secs=video_info.count_interval_secs, job_duration=video_info.job_duration, ) app.run_tracking() db_client", "\"logs\" def main(): import argparse parser = argparse.ArgumentParser(description=\"Vehicle Counting App\") parser.add_argument(\"--video_uuid\", type=str) parser.add_argument(\"--nxs_url\",", "3 if video_info.count_interval_secs is None: video_info.count_interval_secs = 900 # 15 mins INFER_URL =", "os.environ[\"NXS_URL\"] args.nxs_api_key = os.environ[\"NXS_API_KEY\"] args.blobstore_conn_str = os.environ[\"BLOBSTORE_CONN_STR\"] args.blobstore_container = os.environ[\"BLOBSTORE_CONTAINER\"] args.cosmosdb_conn_str = os.environ[\"COSMOSDB_URL\"]", "region.line lines.append( NxsLine( p0=NxsPoint( x=int(line.p0.x * frame_width), y=int(line.p0.y * frame_height), ), p1=NxsPoint( x=int(line.p1.x", "parser = argparse.ArgumentParser(description=\"Vehicle Counting App\") parser.add_argument(\"--video_uuid\", type=str) parser.add_argument(\"--nxs_url\", type=str) parser.add_argument(\"--nxs_api_key\", type=str) parser.add_argument( \"--object_detector_uuid\",", "x=int(line.p1.x * frame_width), y=int(line.p1.y * frame_height), ), ) ) if \".m3u8\" not in", "= 3 if video_info.count_interval_secs is None: video_info.count_interval_secs = 900 # 15 mins INFER_URL", "import argparse parser = argparse.ArgumentParser(description=\"Vehicle Counting App\") parser.add_argument(\"--video_uuid\", type=str) parser.add_argument(\"--nxs_url\", type=str) parser.add_argument(\"--nxs_api_key\", type=str)", "import cv2 from nxs_libs.db import NxsDbFactory, NxsDbType from apps.vehicle_counting.app_types.app_request import ( InDbTrackingAppRequest, RequestStatus,", "args.cosmosdb_db_name = os.environ[\"COSMOSDB_NAME\"] try: db_client = NxsDbFactory.create_db( NxsDbType.MONGODB, uri=args.cosmosdb_conn_str, db_name=args.cosmosdb_db_name, ) db_client.update( DB_TASKS_COLLECTION_NAME,", "INFER_URL = f\"{args.nxs_url}/api/v2/tasks/tensors/infer\" OBJECT_DETECTOR_UUID = args.object_detector_uuid TRACKER_UUID = args.tracker_uuid cap = cv2.VideoCapture(video_info.video_url) frame_width", "skip_frame=video_info.skip_frames, blobstore_conn_str=args.blobstore_conn_str, blobstore_container_name=args.blobstore_container, cosmosdb_conn_str=args.cosmosdb_conn_str, cosmosdb_db_name=args.cosmosdb_db_name, counting_report_interval_secs=video_info.count_interval_secs, job_duration=video_info.job_duration, ) else: from apps.vehicle_counting.worker.offline_worker import (", "OnlineVehicleTrackingApp, ) app = OnlineVehicleTrackingApp( video_uuid=video_info.video_uuid, frame_width=frame_width, frame_height=frame_height, frame_rate=frame_rate, nxs_infer_url=INFER_URL, nxs_api_key=args.nxs_api_key, detector_uuid=OBJECT_DETECTOR_UUID, tracker_uuid=TRACKER_UUID,", "nxs_api_key=args.nxs_api_key, detector_uuid=OBJECT_DETECTOR_UUID, tracker_uuid=TRACKER_UUID, video_url=video_info.video_url, rois=rois, lines=lines, tracking_classes=video_info.tracking_classes, visualize=False, collect_logs=args.debug, skip_frame=video_info.skip_frames, blobstore_conn_str=args.blobstore_conn_str, blobstore_container_name=args.blobstore_container, cosmosdb_conn_str=args.cosmosdb_conn_str,", "\"true\") ) args = parser.parse_args() args.video_uuid = os.environ[\"VIDEO_UUID\"] args.nxs_url = os.environ[\"NXS_URL\"] args.nxs_api_key =", "* frame_width), int(p.y * frame_height), ) ) rois.append(NxsRoi(points=points)) line = region.line lines.append( NxsLine(", "None: video_info.count_interval_secs = 900 # 15 mins INFER_URL = f\"{args.nxs_url}/api/v2/tasks/tensors/infer\" OBJECT_DETECTOR_UUID = args.object_detector_uuid", "DB_TASKS_COLLECTION_NAME, { \"video_uuid\": args.video_uuid, \"zone\": \"global\", }, {\"status\": RequestStatus.FAILED, \"error\": str(e)}, ) if", "# 15 mins INFER_URL = f\"{args.nxs_url}/api/v2/tasks/tensors/infer\" OBJECT_DETECTOR_UUID = args.object_detector_uuid TRACKER_UUID = args.tracker_uuid cap", "\"logs\" STORAGE_LOGS_DIR_PATH = \"logs\" def main(): import argparse parser = argparse.ArgumentParser(description=\"Vehicle Counting App\")", "app.run_tracking() db_client = NxsDbFactory.create_db( NxsDbType.MONGODB, uri=args.cosmosdb_conn_str, db_name=args.cosmosdb_db_name, ) if app.job_completed: db_client.update( DB_TASKS_COLLECTION_NAME, {", ") app = OfflineVehicleTrackingApp( video_uuid=video_info.video_uuid, frame_width=frame_width, frame_height=frame_height, frame_rate=frame_rate, nxs_infer_url=INFER_URL, nxs_api_key=args.nxs_api_key, detector_uuid=OBJECT_DETECTOR_UUID, tracker_uuid=TRACKER_UUID, video_url=video_info.video_url,", "for p in region.roi.points: points.append( NxsPoint( int(p.x * frame_width), int(p.y * frame_height), )", "\"video_uuid\": args.video_uuid, \"zone\": \"global\", }, {\"status\": RequestStatus.FAILED, \"error\": \"stream ended\"}, ) except Exception", "\"error\": \"\"}, ) else: db_client.update( DB_TASKS_COLLECTION_NAME, { \"video_uuid\": args.video_uuid, \"zone\": \"global\", }, {\"status\":", "ended\"}, ) except Exception as e: print(e) db_client = NxsDbFactory.create_db( NxsDbType.MONGODB, uri=args.cosmosdb_conn_str, db_name=args.cosmosdb_db_name,", "args.tracker_uuid cap = cv2.VideoCapture(video_info.video_url) frame_width = cap.get(cv2.CAP_PROP_FRAME_WIDTH) frame_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT) frame_rate = int(round(cap.get(cv2.CAP_PROP_FPS)))", "== \"true\") ) args = parser.parse_args() args.video_uuid = os.environ[\"VIDEO_UUID\"] args.nxs_url = os.environ[\"NXS_URL\"] args.nxs_api_key", "detector_uuid=OBJECT_DETECTOR_UUID, tracker_uuid=TRACKER_UUID, video_url=video_info.video_url, rois=rois, lines=lines, tracking_classes=video_info.tracking_classes, visualize=False, collect_logs=args.debug, skip_frame=video_info.skip_frames, blobstore_conn_str=args.blobstore_conn_str, blobstore_container_name=args.blobstore_container, cosmosdb_conn_str=args.cosmosdb_conn_str, cosmosdb_db_name=args.cosmosdb_db_name,", "type=str) parser.add_argument( \"--debug\", default=False, type=lambda x: (str(x).lower() == \"true\") ) args = parser.parse_args()", ") args = parser.parse_args() args.video_uuid = os.environ[\"VIDEO_UUID\"] args.nxs_url = os.environ[\"NXS_URL\"] args.nxs_api_key = os.environ[\"NXS_API_KEY\"]", "frame_rate = int(round(cap.get(cv2.CAP_PROP_FPS))) cap.release() rois = [] lines = [] for region in", ") ) if \".m3u8\" not in video_info.video_url: from apps.vehicle_counting.worker.online_worker import ( OnlineVehicleTrackingApp, )", "parser.parse_args() args.video_uuid = os.environ[\"VIDEO_UUID\"] args.nxs_url = os.environ[\"NXS_URL\"] args.nxs_api_key = os.environ[\"NXS_API_KEY\"] args.blobstore_conn_str = os.environ[\"BLOBSTORE_CONN_STR\"]", "{\"status\": RequestStatus.COMPLETED, \"error\": \"\"}, ) else: db_client.update( DB_TASKS_COLLECTION_NAME, { \"video_uuid\": args.video_uuid, \"zone\": \"global\",", "args.video_uuid, \"zone\": \"global\", }, {\"status\": RequestStatus.FAILED, \"error\": \"stream ended\"}, ) except Exception as", "RequestStatus.FAILED, \"error\": \"stream ended\"}, ) except Exception as e: print(e) db_client = NxsDbFactory.create_db(", "= \"counts\" DB_LOGS_COLLECTION_NAME = \"logs\" STORAGE_LOGS_DIR_PATH = \"logs\" def main(): import argparse parser", "NxsDbType.MONGODB, uri=args.cosmosdb_conn_str, db_name=args.cosmosdb_db_name, ) if app.job_completed: db_client.update( DB_TASKS_COLLECTION_NAME, { \"video_uuid\": args.video_uuid, \"zone\": \"global\",", "from apps.vehicle_counting.worker.online_worker import ( OnlineVehicleTrackingApp, ) app = OnlineVehicleTrackingApp( video_uuid=video_info.video_uuid, frame_width=frame_width, frame_height=frame_height, frame_rate=frame_rate,", "= args.object_detector_uuid TRACKER_UUID = args.tracker_uuid cap = cv2.VideoCapture(video_info.video_url) frame_width = cap.get(cv2.CAP_PROP_FRAME_WIDTH) frame_height =", "video_info.count_interval_secs is None: video_info.count_interval_secs = 900 # 15 mins INFER_URL = f\"{args.nxs_url}/api/v2/tasks/tensors/infer\" OBJECT_DETECTOR_UUID", "\"stream ended\"}, ) except Exception as e: print(e) db_client = NxsDbFactory.create_db( NxsDbType.MONGODB, uri=args.cosmosdb_conn_str,", "int(round(cap.get(cv2.CAP_PROP_FPS))) cap.release() rois = [] lines = [] for region in video_info.regions: points", "import os import cv2 from nxs_libs.db import NxsDbFactory, NxsDbType from apps.vehicle_counting.app_types.app_request import (", "args.nxs_api_key = os.environ[\"NXS_API_KEY\"] args.blobstore_conn_str = os.environ[\"BLOBSTORE_CONN_STR\"] args.blobstore_container = os.environ[\"BLOBSTORE_CONTAINER\"] args.cosmosdb_conn_str = os.environ[\"COSMOSDB_URL\"] args.cosmosdb_db_name", "= [] for p in region.roi.points: points.append( NxsPoint( int(p.x * frame_width), int(p.y *", "}, {\"status\": RequestStatus.RUNNING}, ) video_info = InDbTrackingAppRequest( **db_client.query( DB_TASKS_COLLECTION_NAME, {\"video_uuid\": args.video_uuid} )[0] )", "\"--debug\", default=False, type=lambda x: (str(x).lower() == \"true\") ) args = parser.parse_args() args.video_uuid =", ") else: from apps.vehicle_counting.worker.offline_worker import ( OfflineVehicleTrackingApp, ) app = OfflineVehicleTrackingApp( video_uuid=video_info.video_uuid, frame_width=frame_width,", ") db_client.update( DB_TASKS_COLLECTION_NAME, { \"video_uuid\": args.video_uuid, \"zone\": \"global\", }, {\"status\": RequestStatus.RUNNING}, ) video_info", "apps.vehicle_counting.worker.offline_worker import ( OfflineVehicleTrackingApp, ) app = OfflineVehicleTrackingApp( video_uuid=video_info.video_uuid, frame_width=frame_width, frame_height=frame_height, frame_rate=frame_rate, nxs_infer_url=INFER_URL,", "900 # 15 mins INFER_URL = f\"{args.nxs_url}/api/v2/tasks/tensors/infer\" OBJECT_DETECTOR_UUID = args.object_detector_uuid TRACKER_UUID = args.tracker_uuid", "video_info.skip_frames is None: video_info.skip_frames = 3 if video_info.count_interval_secs is None: video_info.count_interval_secs = 900", "[] lines = [] for region in video_info.regions: points = [] for p", "\"global\", }, {\"status\": RequestStatus.RUNNING}, ) video_info = InDbTrackingAppRequest( **db_client.query( DB_TASKS_COLLECTION_NAME, {\"video_uuid\": args.video_uuid} )[0]", "\"zone\": \"global\", }, {\"status\": RequestStatus.RUNNING}, ) video_info = InDbTrackingAppRequest( **db_client.query( DB_TASKS_COLLECTION_NAME, {\"video_uuid\": args.video_uuid}", "( InDbTrackingAppRequest, RequestStatus, ) from apps.vehicle_counting.worker.utils import * DB_TASKS_COLLECTION_NAME = \"tasks\" DB_COUNTS_COLLECTION_NAME =", "RequestStatus.RUNNING}, ) video_info = InDbTrackingAppRequest( **db_client.query( DB_TASKS_COLLECTION_NAME, {\"video_uuid\": args.video_uuid} )[0] ) if video_info.skip_frames", "app = OnlineVehicleTrackingApp( video_uuid=video_info.video_uuid, frame_width=frame_width, frame_height=frame_height, frame_rate=frame_rate, nxs_infer_url=INFER_URL, nxs_api_key=args.nxs_api_key, detector_uuid=OBJECT_DETECTOR_UUID, tracker_uuid=TRACKER_UUID, video_url=video_info.video_url, rois=rois,", "\"counts\" DB_LOGS_COLLECTION_NAME = \"logs\" STORAGE_LOGS_DIR_PATH = \"logs\" def main(): import argparse parser =", "(str(x).lower() == \"true\") ) args = parser.parse_args() args.video_uuid = os.environ[\"VIDEO_UUID\"] args.nxs_url = os.environ[\"NXS_URL\"]", "DB_TASKS_COLLECTION_NAME, { \"video_uuid\": args.video_uuid, \"zone\": \"global\", }, {\"status\": RequestStatus.COMPLETED, \"error\": \"\"}, ) else:", "type=str) parser.add_argument(\"--blobstore_container\", type=str) parser.add_argument(\"--cosmosdb_conn_str\", type=str) parser.add_argument(\"--cosmosdb_db_name\", type=str) parser.add_argument( \"--debug\", default=False, type=lambda x: (str(x).lower()", "frame_width = cap.get(cv2.CAP_PROP_FRAME_WIDTH) frame_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT) frame_rate = int(round(cap.get(cv2.CAP_PROP_FPS))) cap.release() rois = []", "visualize=False, collect_logs=args.debug, skip_frame=video_info.skip_frames, blobstore_conn_str=args.blobstore_conn_str, blobstore_container_name=args.blobstore_container, cosmosdb_conn_str=args.cosmosdb_conn_str, cosmosdb_db_name=args.cosmosdb_db_name, counting_report_interval_secs=video_info.count_interval_secs, job_duration=video_info.job_duration, ) else: from apps.vehicle_counting.worker.offline_worker", "p1=NxsPoint( x=int(line.p1.x * frame_width), y=int(line.p1.y * frame_height), ), ) ) if \".m3u8\" not", "[] for region in video_info.regions: points = [] for p in region.roi.points: points.append(", "type=str) parser.add_argument( \"--object_detector_uuid\", type=str, default=\"bbff897256c9431eb19a2ad311749b39\", ) parser.add_argument( \"--tracker_uuid\", type=str, default=\"451ffc2ee1594fe2a6ace17fca5117ab\", ) parser.add_argument(\"--blobstore_conn_str\", type=str)", "app = OfflineVehicleTrackingApp( video_uuid=video_info.video_uuid, frame_width=frame_width, frame_height=frame_height, frame_rate=frame_rate, nxs_infer_url=INFER_URL, nxs_api_key=args.nxs_api_key, detector_uuid=OBJECT_DETECTOR_UUID, tracker_uuid=TRACKER_UUID, video_url=video_info.video_url, rois=rois,", "= [] for region in video_info.regions: points = [] for p in region.roi.points:", "blobstore_conn_str=args.blobstore_conn_str, blobstore_container_name=args.blobstore_container, cosmosdb_conn_str=args.cosmosdb_conn_str, cosmosdb_db_name=args.cosmosdb_db_name, counting_report_interval_secs=video_info.count_interval_secs, job_duration=video_info.job_duration, ) else: from apps.vehicle_counting.worker.offline_worker import ( OfflineVehicleTrackingApp,", "type=str) parser.add_argument(\"--cosmosdb_db_name\", type=str) parser.add_argument( \"--debug\", default=False, type=lambda x: (str(x).lower() == \"true\") ) args", "= os.environ[\"COSMOSDB_URL\"] args.cosmosdb_db_name = os.environ[\"COSMOSDB_NAME\"] try: db_client = NxsDbFactory.create_db( NxsDbType.MONGODB, uri=args.cosmosdb_conn_str, db_name=args.cosmosdb_db_name, )", "), ) ) if \".m3u8\" not in video_info.video_url: from apps.vehicle_counting.worker.online_worker import ( OnlineVehicleTrackingApp,", "app.job_completed: db_client.update( DB_TASKS_COLLECTION_NAME, { \"video_uuid\": args.video_uuid, \"zone\": \"global\", }, {\"status\": RequestStatus.COMPLETED, \"error\": \"\"},", "( OfflineVehicleTrackingApp, ) app = OfflineVehicleTrackingApp( video_uuid=video_info.video_uuid, frame_width=frame_width, frame_height=frame_height, frame_rate=frame_rate, nxs_infer_url=INFER_URL, nxs_api_key=args.nxs_api_key, detector_uuid=OBJECT_DETECTOR_UUID,", "\"--object_detector_uuid\", type=str, default=\"bbff897256c9431eb19a2ad311749b39\", ) parser.add_argument( \"--tracker_uuid\", type=str, default=\"451ffc2ee1594fe2a6ace17fca5117ab\", ) parser.add_argument(\"--blobstore_conn_str\", type=str) parser.add_argument(\"--blobstore_container\", type=str)", "not in video_info.video_url: from apps.vehicle_counting.worker.online_worker import ( OnlineVehicleTrackingApp, ) app = OnlineVehicleTrackingApp( video_uuid=video_info.video_uuid,", "OfflineVehicleTrackingApp( video_uuid=video_info.video_uuid, frame_width=frame_width, frame_height=frame_height, frame_rate=frame_rate, nxs_infer_url=INFER_URL, nxs_api_key=args.nxs_api_key, detector_uuid=OBJECT_DETECTOR_UUID, tracker_uuid=TRACKER_UUID, video_url=video_info.video_url, rois=rois, lines=lines, tracking_classes=video_info.tracking_classes,", "os.environ[\"COSMOSDB_URL\"] args.cosmosdb_db_name = os.environ[\"COSMOSDB_NAME\"] try: db_client = NxsDbFactory.create_db( NxsDbType.MONGODB, uri=args.cosmosdb_conn_str, db_name=args.cosmosdb_db_name, ) db_client.update(", "import ( OnlineVehicleTrackingApp, ) app = OnlineVehicleTrackingApp( video_uuid=video_info.video_uuid, frame_width=frame_width, frame_height=frame_height, frame_rate=frame_rate, nxs_infer_url=INFER_URL, nxs_api_key=args.nxs_api_key,", "args.video_uuid = os.environ[\"VIDEO_UUID\"] args.nxs_url = os.environ[\"NXS_URL\"] args.nxs_api_key = os.environ[\"NXS_API_KEY\"] args.blobstore_conn_str = os.environ[\"BLOBSTORE_CONN_STR\"] args.blobstore_container", "x=int(line.p0.x * frame_width), y=int(line.p0.y * frame_height), ), p1=NxsPoint( x=int(line.p1.x * frame_width), y=int(line.p1.y *", "lines=lines, tracking_classes=video_info.tracking_classes, visualize=False, collect_logs=args.debug, skip_frame=video_info.skip_frames, blobstore_conn_str=args.blobstore_conn_str, blobstore_container_name=args.blobstore_container, cosmosdb_conn_str=args.cosmosdb_conn_str, cosmosdb_db_name=args.cosmosdb_db_name, counting_report_interval_secs=video_info.count_interval_secs, job_duration=video_info.job_duration, ) else:", "skip_frame=video_info.skip_frames, blobstore_conn_str=args.blobstore_conn_str, blobstore_container_name=args.blobstore_container, cosmosdb_conn_str=args.cosmosdb_conn_str, cosmosdb_db_name=args.cosmosdb_db_name, counting_report_interval_secs=video_info.count_interval_secs, job_duration=video_info.job_duration, ) app.run_tracking() db_client = NxsDbFactory.create_db( NxsDbType.MONGODB,", "x: (str(x).lower() == \"true\") ) args = parser.parse_args() args.video_uuid = os.environ[\"VIDEO_UUID\"] args.nxs_url =", "args = parser.parse_args() args.video_uuid = os.environ[\"VIDEO_UUID\"] args.nxs_url = os.environ[\"NXS_URL\"] args.nxs_api_key = os.environ[\"NXS_API_KEY\"] args.blobstore_conn_str", "int(p.y * frame_height), ) ) rois.append(NxsRoi(points=points)) line = region.line lines.append( NxsLine( p0=NxsPoint( x=int(line.p0.x", "job_duration=video_info.job_duration, ) else: from apps.vehicle_counting.worker.offline_worker import ( OfflineVehicleTrackingApp, ) app = OfflineVehicleTrackingApp( video_uuid=video_info.video_uuid,", "RequestStatus, ) from apps.vehicle_counting.worker.utils import * DB_TASKS_COLLECTION_NAME = \"tasks\" DB_COUNTS_COLLECTION_NAME = \"counts\" DB_LOGS_COLLECTION_NAME", "if video_info.skip_frames is None: video_info.skip_frames = 3 if video_info.count_interval_secs is None: video_info.count_interval_secs =", "type=str) parser.add_argument(\"--cosmosdb_conn_str\", type=str) parser.add_argument(\"--cosmosdb_db_name\", type=str) parser.add_argument( \"--debug\", default=False, type=lambda x: (str(x).lower() == \"true\")", "DB_TASKS_COLLECTION_NAME, {\"video_uuid\": args.video_uuid} )[0] ) if video_info.skip_frames is None: video_info.skip_frames = 3 if", "= os.environ[\"BLOBSTORE_CONTAINER\"] args.cosmosdb_conn_str = os.environ[\"COSMOSDB_URL\"] args.cosmosdb_db_name = os.environ[\"COSMOSDB_NAME\"] try: db_client = NxsDbFactory.create_db( NxsDbType.MONGODB,", "parser.add_argument(\"--blobstore_conn_str\", type=str) parser.add_argument(\"--blobstore_container\", type=str) parser.add_argument(\"--cosmosdb_conn_str\", type=str) parser.add_argument(\"--cosmosdb_db_name\", type=str) parser.add_argument( \"--debug\", default=False, type=lambda x:", "= parser.parse_args() args.video_uuid = os.environ[\"VIDEO_UUID\"] args.nxs_url = os.environ[\"NXS_URL\"] args.nxs_api_key = os.environ[\"NXS_API_KEY\"] args.blobstore_conn_str =", "db_client.update( DB_TASKS_COLLECTION_NAME, { \"video_uuid\": args.video_uuid, \"zone\": \"global\", }, {\"status\": RequestStatus.FAILED, \"error\": \"stream ended\"},", "type=lambda x: (str(x).lower() == \"true\") ) args = parser.parse_args() args.video_uuid = os.environ[\"VIDEO_UUID\"] args.nxs_url", "db_name=args.cosmosdb_db_name, ) if app.job_completed: db_client.update( DB_TASKS_COLLECTION_NAME, { \"video_uuid\": args.video_uuid, \"zone\": \"global\", }, {\"status\":" ]
[ "perturbations. Note this vary rarely makes any significant # differences method='model-scoring') # corss", "test_z): self._skater_model, self._skater_interpreter = _create_skater_stuff(md, test_x, test_z) def save_plot_feature_importance(self, file_path): fig, ax =", "skater.core.explanations import Interpretation from hassbrain_algorithm.benchmark.interpretation import ModelWrapper from hassbrain_algorithm.benchmark.interpretation import _boolean2str wrapped_model =", "and not numerical test_x = _boolean2str(test_x) # create interpretation interpreter = Interpretation(test_x, #class_names=class_names,", "feature_names=feature_names, model_type='classifier', unique_values=class_names, probability=False, examples=examples) interpreter.load_data(test_x, training_labels=test_z, feature_names=feature_names) # todo flag for deletion", "flag for deletion (3lines below) # if this can savely be deleted tmp", "test_z) def save_plot_feature_importance(self, file_path): fig, ax = self._skater_interpreter.feature_importance.plot_feature_importance( self._skater_model, ascending=True, ax=None, progressbar=False, #", "from skater.model import InMemoryModel from skater.core.explanations import Interpretation from hassbrain_algorithm.benchmark.interpretation import ModelWrapper from", "import _boolean2str wrapped_model = ModelWrapper(mdl) class_names = mdl.get_state_lbl_lst() feature_names = mdl.get_obs_lbl_lst() # this", "fig.savefig(file_path, dpi=fig.dpi) plt.close(fig) def _create_skater_stuff(mdl, test_x, test_z): from skater.model import InMemoryModel from skater.core.explanations", "below) # if this can savely be deleted tmp = interpreter.data_set.feature_info for key,", "unique_values=class_names, probability=False, examples=examples) interpreter.load_data(test_x, training_labels=test_z, feature_names=feature_names) # todo flag for deletion (3lines below)", "Figure, axes #scorer_type='f1') # type: Figure, axes import matplotlib.pyplot as plt plt.tight_layout() fig.savefig(file_path,", "InMemoryModel(wrapped_model.predict, #target_names=class_names, feature_names=feature_names, model_type='classifier', unique_values=class_names, probability=False, examples=examples) interpreter.load_data(test_x, training_labels=test_z, feature_names=feature_names) # todo flag", "def save_plot_feature_importance(self, file_path): fig, ax = self._skater_interpreter.feature_importance.plot_feature_importance( self._skater_model, ascending=True, ax=None, progressbar=False, # model-scoring:", "self._skater_interpreter.feature_importance.plot_feature_importance( self._skater_model, ascending=True, ax=None, progressbar=False, # model-scoring: difference in log_loss or MAE of", "# supports classifiers with or without probability scores examples = test_x[:10] skater_model =", "from hassbrain_algorithm.benchmark.interpretation import ModelWrapper from hassbrain_algorithm.benchmark.interpretation import _boolean2str wrapped_model = ModelWrapper(mdl) class_names =", "Note this vary rarely makes any significant # differences method='model-scoring') # corss entropy", "log_loss or MAE of training_labels # given perturbations. Note this vary rarely makes", "#class_names=class_names, feature_names=feature_names) # create model # supports classifiers with or without probability scores", "def __init__(self, md, test_x, test_z): self._skater_model, self._skater_interpreter = _create_skater_stuff(md, test_x, test_z) def save_plot_feature_importance(self,", "import ModelWrapper from hassbrain_algorithm.benchmark.interpretation import _boolean2str wrapped_model = ModelWrapper(mdl) class_names = mdl.get_state_lbl_lst() feature_names", "# given perturbations. Note this vary rarely makes any significant # differences method='model-scoring')", "interpretation interpreter = Interpretation(test_x, #class_names=class_names, feature_names=feature_names) # create model # supports classifiers with", "feature_names=feature_names) # create model # supports classifiers with or without probability scores examples", "def _create_skater_stuff(mdl, test_x, test_z): from skater.model import InMemoryModel from skater.core.explanations import Interpretation from", "= ModelWrapper(mdl) class_names = mdl.get_state_lbl_lst() feature_names = mdl.get_obs_lbl_lst() # this has to be", "skater_model = InMemoryModel(wrapped_model.predict, #target_names=class_names, feature_names=feature_names, model_type='classifier', unique_values=class_names, probability=False, examples=examples) interpreter.load_data(test_x, training_labels=test_z, feature_names=feature_names) #", "test_x = _boolean2str(test_x) # create interpretation interpreter = Interpretation(test_x, #class_names=class_names, feature_names=feature_names) # create", "method='model-scoring') # corss entropy or f1 ('f1', 'cross_entropy') #scorer_type='cross_entropy') # type: Figure, axes", "create model # supports classifiers with or without probability scores examples = test_x[:10]", "training_labels=test_z, feature_names=feature_names) # todo flag for deletion (3lines below) # if this can", "skater.model import InMemoryModel from skater.core.explanations import Interpretation from hassbrain_algorithm.benchmark.interpretation import ModelWrapper from hassbrain_algorithm.benchmark.interpretation", "ModelWrapper from hassbrain_algorithm.benchmark.interpretation import _boolean2str wrapped_model = ModelWrapper(mdl) class_names = mdl.get_state_lbl_lst() feature_names =", "# model-scoring: difference in log_loss or MAE of training_labels # given perturbations. Note", "= self._skater_interpreter.feature_importance.plot_feature_importance( self._skater_model, ascending=True, ax=None, progressbar=False, # model-scoring: difference in log_loss or MAE", "any significant # differences method='model-scoring') # corss entropy or f1 ('f1', 'cross_entropy') #scorer_type='cross_entropy')", "= Interpretation(test_x, #class_names=class_names, feature_names=feature_names) # create model # supports classifiers with or without", "ax=None, progressbar=False, # model-scoring: difference in log_loss or MAE of training_labels # given", "to recognize the values as categorical and not numerical test_x = _boolean2str(test_x) #", "order for skater to recognize the values as categorical and not numerical test_x", "class FeatureImportance(object): def __init__(self, md, test_x, test_z): self._skater_model, self._skater_interpreter = _create_skater_stuff(md, test_x, test_z)", "numerical test_x = _boolean2str(test_x) # create interpretation interpreter = Interpretation(test_x, #class_names=class_names, feature_names=feature_names) #", "be done in order for skater to recognize the values as categorical and", "makes any significant # differences method='model-scoring') # corss entropy or f1 ('f1', 'cross_entropy')", "be deleted tmp = interpreter.data_set.feature_info for key, val in tmp.items(): val['numeric'] = False", "significant # differences method='model-scoring') # corss entropy or f1 ('f1', 'cross_entropy') #scorer_type='cross_entropy') #", "import InMemoryModel from skater.core.explanations import Interpretation from hassbrain_algorithm.benchmark.interpretation import ModelWrapper from hassbrain_algorithm.benchmark.interpretation import", "dpi=fig.dpi) plt.close(fig) def _create_skater_stuff(mdl, test_x, test_z): from skater.model import InMemoryModel from skater.core.explanations import", "as plt plt.tight_layout() fig.savefig(file_path, dpi=fig.dpi) plt.close(fig) def _create_skater_stuff(mdl, test_x, test_z): from skater.model import", "ax = self._skater_interpreter.feature_importance.plot_feature_importance( self._skater_model, ascending=True, ax=None, progressbar=False, # model-scoring: difference in log_loss or", "# corss entropy or f1 ('f1', 'cross_entropy') #scorer_type='cross_entropy') # type: Figure, axes #scorer_type='f1')", "this vary rarely makes any significant # differences method='model-scoring') # corss entropy or", "self._skater_model, ascending=True, ax=None, progressbar=False, # model-scoring: difference in log_loss or MAE of training_labels", "# this has to be done in order for skater to recognize the", "# create interpretation interpreter = Interpretation(test_x, #class_names=class_names, feature_names=feature_names) # create model # supports", "supports classifiers with or without probability scores examples = test_x[:10] skater_model = InMemoryModel(wrapped_model.predict,", "mdl.get_state_lbl_lst() feature_names = mdl.get_obs_lbl_lst() # this has to be done in order for", "or MAE of training_labels # given perturbations. Note this vary rarely makes any", "mdl.get_obs_lbl_lst() # this has to be done in order for skater to recognize", "create interpretation interpreter = Interpretation(test_x, #class_names=class_names, feature_names=feature_names) # create model # supports classifiers", "progressbar=False, # model-scoring: difference in log_loss or MAE of training_labels # given perturbations.", "axes #scorer_type='f1') # type: Figure, axes import matplotlib.pyplot as plt plt.tight_layout() fig.savefig(file_path, dpi=fig.dpi)", "# type: Figure, axes #scorer_type='f1') # type: Figure, axes import matplotlib.pyplot as plt", "savely be deleted tmp = interpreter.data_set.feature_info for key, val in tmp.items(): val['numeric'] =", "in log_loss or MAE of training_labels # given perturbations. Note this vary rarely", "from hassbrain_algorithm.benchmark.interpretation import _boolean2str wrapped_model = ModelWrapper(mdl) class_names = mdl.get_state_lbl_lst() feature_names = mdl.get_obs_lbl_lst()", "ModelWrapper(mdl) class_names = mdl.get_state_lbl_lst() feature_names = mdl.get_obs_lbl_lst() # this has to be done", "can savely be deleted tmp = interpreter.data_set.feature_info for key, val in tmp.items(): val['numeric']", "ascending=True, ax=None, progressbar=False, # model-scoring: difference in log_loss or MAE of training_labels #", "# differences method='model-scoring') # corss entropy or f1 ('f1', 'cross_entropy') #scorer_type='cross_entropy') # type:", "plt.tight_layout() fig.savefig(file_path, dpi=fig.dpi) plt.close(fig) def _create_skater_stuff(mdl, test_x, test_z): from skater.model import InMemoryModel from", "model_type='classifier', unique_values=class_names, probability=False, examples=examples) interpreter.load_data(test_x, training_labels=test_z, feature_names=feature_names) # todo flag for deletion (3lines", "fig, ax = self._skater_interpreter.feature_importance.plot_feature_importance( self._skater_model, ascending=True, ax=None, progressbar=False, # model-scoring: difference in log_loss", "self._skater_interpreter = _create_skater_stuff(md, test_x, test_z) def save_plot_feature_importance(self, file_path): fig, ax = self._skater_interpreter.feature_importance.plot_feature_importance( self._skater_model,", "_boolean2str wrapped_model = ModelWrapper(mdl) class_names = mdl.get_state_lbl_lst() feature_names = mdl.get_obs_lbl_lst() # this has", "hassbrain_algorithm.benchmark.interpretation import ModelWrapper from hassbrain_algorithm.benchmark.interpretation import _boolean2str wrapped_model = ModelWrapper(mdl) class_names = mdl.get_state_lbl_lst()", "self._skater_model, self._skater_interpreter = _create_skater_stuff(md, test_x, test_z) def save_plot_feature_importance(self, file_path): fig, ax = self._skater_interpreter.feature_importance.plot_feature_importance(", "hassbrain_algorithm.benchmark.interpretation import _boolean2str wrapped_model = ModelWrapper(mdl) class_names = mdl.get_state_lbl_lst() feature_names = mdl.get_obs_lbl_lst() #", "= InMemoryModel(wrapped_model.predict, #target_names=class_names, feature_names=feature_names, model_type='classifier', unique_values=class_names, probability=False, examples=examples) interpreter.load_data(test_x, training_labels=test_z, feature_names=feature_names) # todo", "_create_skater_stuff(mdl, test_x, test_z): from skater.model import InMemoryModel from skater.core.explanations import Interpretation from hassbrain_algorithm.benchmark.interpretation", "save_plot_feature_importance(self, file_path): fig, ax = self._skater_interpreter.feature_importance.plot_feature_importance( self._skater_model, ascending=True, ax=None, progressbar=False, # model-scoring: difference", "training_labels # given perturbations. Note this vary rarely makes any significant # differences", "this has to be done in order for skater to recognize the values", "test_x, test_z) def save_plot_feature_importance(self, file_path): fig, ax = self._skater_interpreter.feature_importance.plot_feature_importance( self._skater_model, ascending=True, ax=None, progressbar=False,", "('f1', 'cross_entropy') #scorer_type='cross_entropy') # type: Figure, axes #scorer_type='f1') # type: Figure, axes import", "examples = test_x[:10] skater_model = InMemoryModel(wrapped_model.predict, #target_names=class_names, feature_names=feature_names, model_type='classifier', unique_values=class_names, probability=False, examples=examples) interpreter.load_data(test_x,", "in order for skater to recognize the values as categorical and not numerical", "test_z): from skater.model import InMemoryModel from skater.core.explanations import Interpretation from hassbrain_algorithm.benchmark.interpretation import ModelWrapper", "matplotlib.pyplot as plt plt.tight_layout() fig.savefig(file_path, dpi=fig.dpi) plt.close(fig) def _create_skater_stuff(mdl, test_x, test_z): from skater.model", "or f1 ('f1', 'cross_entropy') #scorer_type='cross_entropy') # type: Figure, axes #scorer_type='f1') # type: Figure,", "examples=examples) interpreter.load_data(test_x, training_labels=test_z, feature_names=feature_names) # todo flag for deletion (3lines below) # if", "test_x[:10] skater_model = InMemoryModel(wrapped_model.predict, #target_names=class_names, feature_names=feature_names, model_type='classifier', unique_values=class_names, probability=False, examples=examples) interpreter.load_data(test_x, training_labels=test_z, feature_names=feature_names)", "this can savely be deleted tmp = interpreter.data_set.feature_info for key, val in tmp.items():", "type: Figure, axes import matplotlib.pyplot as plt plt.tight_layout() fig.savefig(file_path, dpi=fig.dpi) plt.close(fig) def _create_skater_stuff(mdl,", "as categorical and not numerical test_x = _boolean2str(test_x) # create interpretation interpreter =", "Interpretation from hassbrain_algorithm.benchmark.interpretation import ModelWrapper from hassbrain_algorithm.benchmark.interpretation import _boolean2str wrapped_model = ModelWrapper(mdl) class_names", "differences method='model-scoring') # corss entropy or f1 ('f1', 'cross_entropy') #scorer_type='cross_entropy') # type: Figure,", "= _create_skater_stuff(md, test_x, test_z) def save_plot_feature_importance(self, file_path): fig, ax = self._skater_interpreter.feature_importance.plot_feature_importance( self._skater_model, ascending=True,", "InMemoryModel from skater.core.explanations import Interpretation from hassbrain_algorithm.benchmark.interpretation import ModelWrapper from hassbrain_algorithm.benchmark.interpretation import _boolean2str", "model # supports classifiers with or without probability scores examples = test_x[:10] skater_model", "plt.close(fig) def _create_skater_stuff(mdl, test_x, test_z): from skater.model import InMemoryModel from skater.core.explanations import Interpretation", "deleted tmp = interpreter.data_set.feature_info for key, val in tmp.items(): val['numeric'] = False return", "_create_skater_stuff(md, test_x, test_z) def save_plot_feature_importance(self, file_path): fig, ax = self._skater_interpreter.feature_importance.plot_feature_importance( self._skater_model, ascending=True, ax=None,", "Interpretation(test_x, #class_names=class_names, feature_names=feature_names) # create model # supports classifiers with or without probability", "md, test_x, test_z): self._skater_model, self._skater_interpreter = _create_skater_stuff(md, test_x, test_z) def save_plot_feature_importance(self, file_path): fig,", "= _boolean2str(test_x) # create interpretation interpreter = Interpretation(test_x, #class_names=class_names, feature_names=feature_names) # create model", "# create model # supports classifiers with or without probability scores examples =", "wrapped_model = ModelWrapper(mdl) class_names = mdl.get_state_lbl_lst() feature_names = mdl.get_obs_lbl_lst() # this has to", "__init__(self, md, test_x, test_z): self._skater_model, self._skater_interpreter = _create_skater_stuff(md, test_x, test_z) def save_plot_feature_importance(self, file_path):", "_boolean2str(test_x) # create interpretation interpreter = Interpretation(test_x, #class_names=class_names, feature_names=feature_names) # create model #", "= test_x[:10] skater_model = InMemoryModel(wrapped_model.predict, #target_names=class_names, feature_names=feature_names, model_type='classifier', unique_values=class_names, probability=False, examples=examples) interpreter.load_data(test_x, training_labels=test_z,", "MAE of training_labels # given perturbations. Note this vary rarely makes any significant", "to be done in order for skater to recognize the values as categorical", "vary rarely makes any significant # differences method='model-scoring') # corss entropy or f1", "test_x, test_z): self._skater_model, self._skater_interpreter = _create_skater_stuff(md, test_x, test_z) def save_plot_feature_importance(self, file_path): fig, ax", "interpreter.load_data(test_x, training_labels=test_z, feature_names=feature_names) # todo flag for deletion (3lines below) # if this", "# todo flag for deletion (3lines below) # if this can savely be", "#scorer_type='cross_entropy') # type: Figure, axes #scorer_type='f1') # type: Figure, axes import matplotlib.pyplot as", "without probability scores examples = test_x[:10] skater_model = InMemoryModel(wrapped_model.predict, #target_names=class_names, feature_names=feature_names, model_type='classifier', unique_values=class_names,", "model-scoring: difference in log_loss or MAE of training_labels # given perturbations. Note this", "for skater to recognize the values as categorical and not numerical test_x =", "with or without probability scores examples = test_x[:10] skater_model = InMemoryModel(wrapped_model.predict, #target_names=class_names, feature_names=feature_names,", "probability=False, examples=examples) interpreter.load_data(test_x, training_labels=test_z, feature_names=feature_names) # todo flag for deletion (3lines below) #", "difference in log_loss or MAE of training_labels # given perturbations. Note this vary", "classifiers with or without probability scores examples = test_x[:10] skater_model = InMemoryModel(wrapped_model.predict, #target_names=class_names,", "interpreter = Interpretation(test_x, #class_names=class_names, feature_names=feature_names) # create model # supports classifiers with or", "import Interpretation from hassbrain_algorithm.benchmark.interpretation import ModelWrapper from hassbrain_algorithm.benchmark.interpretation import _boolean2str wrapped_model = ModelWrapper(mdl)", "probability scores examples = test_x[:10] skater_model = InMemoryModel(wrapped_model.predict, #target_names=class_names, feature_names=feature_names, model_type='classifier', unique_values=class_names, probability=False,", "has to be done in order for skater to recognize the values as", "(3lines below) # if this can savely be deleted tmp = interpreter.data_set.feature_info for", "plt plt.tight_layout() fig.savefig(file_path, dpi=fig.dpi) plt.close(fig) def _create_skater_stuff(mdl, test_x, test_z): from skater.model import InMemoryModel", "= mdl.get_obs_lbl_lst() # this has to be done in order for skater to", "'cross_entropy') #scorer_type='cross_entropy') # type: Figure, axes #scorer_type='f1') # type: Figure, axes import matplotlib.pyplot", "entropy or f1 ('f1', 'cross_entropy') #scorer_type='cross_entropy') # type: Figure, axes #scorer_type='f1') # type:", "FeatureImportance(object): def __init__(self, md, test_x, test_z): self._skater_model, self._skater_interpreter = _create_skater_stuff(md, test_x, test_z) def", "= mdl.get_state_lbl_lst() feature_names = mdl.get_obs_lbl_lst() # this has to be done in order", "recognize the values as categorical and not numerical test_x = _boolean2str(test_x) # create", "axes import matplotlib.pyplot as plt plt.tight_layout() fig.savefig(file_path, dpi=fig.dpi) plt.close(fig) def _create_skater_stuff(mdl, test_x, test_z):", "scores examples = test_x[:10] skater_model = InMemoryModel(wrapped_model.predict, #target_names=class_names, feature_names=feature_names, model_type='classifier', unique_values=class_names, probability=False, examples=examples)", "#scorer_type='f1') # type: Figure, axes import matplotlib.pyplot as plt plt.tight_layout() fig.savefig(file_path, dpi=fig.dpi) plt.close(fig)", "# type: Figure, axes import matplotlib.pyplot as plt plt.tight_layout() fig.savefig(file_path, dpi=fig.dpi) plt.close(fig) def", "given perturbations. Note this vary rarely makes any significant # differences method='model-scoring') #", "the values as categorical and not numerical test_x = _boolean2str(test_x) # create interpretation", "or without probability scores examples = test_x[:10] skater_model = InMemoryModel(wrapped_model.predict, #target_names=class_names, feature_names=feature_names, model_type='classifier',", "#target_names=class_names, feature_names=feature_names, model_type='classifier', unique_values=class_names, probability=False, examples=examples) interpreter.load_data(test_x, training_labels=test_z, feature_names=feature_names) # todo flag for", "class_names = mdl.get_state_lbl_lst() feature_names = mdl.get_obs_lbl_lst() # this has to be done in", "test_x, test_z): from skater.model import InMemoryModel from skater.core.explanations import Interpretation from hassbrain_algorithm.benchmark.interpretation import", "for deletion (3lines below) # if this can savely be deleted tmp =", "Figure, axes import matplotlib.pyplot as plt plt.tight_layout() fig.savefig(file_path, dpi=fig.dpi) plt.close(fig) def _create_skater_stuff(mdl, test_x,", "f1 ('f1', 'cross_entropy') #scorer_type='cross_entropy') # type: Figure, axes #scorer_type='f1') # type: Figure, axes", "= interpreter.data_set.feature_info for key, val in tmp.items(): val['numeric'] = False return skater_model, interpreter", "todo flag for deletion (3lines below) # if this can savely be deleted", "if this can savely be deleted tmp = interpreter.data_set.feature_info for key, val in", "type: Figure, axes #scorer_type='f1') # type: Figure, axes import matplotlib.pyplot as plt plt.tight_layout()", "tmp = interpreter.data_set.feature_info for key, val in tmp.items(): val['numeric'] = False return skater_model,", "not numerical test_x = _boolean2str(test_x) # create interpretation interpreter = Interpretation(test_x, #class_names=class_names, feature_names=feature_names)", "from skater.core.explanations import Interpretation from hassbrain_algorithm.benchmark.interpretation import ModelWrapper from hassbrain_algorithm.benchmark.interpretation import _boolean2str wrapped_model", "of training_labels # given perturbations. Note this vary rarely makes any significant #", "rarely makes any significant # differences method='model-scoring') # corss entropy or f1 ('f1',", "feature_names=feature_names) # todo flag for deletion (3lines below) # if this can savely", "feature_names = mdl.get_obs_lbl_lst() # this has to be done in order for skater", "deletion (3lines below) # if this can savely be deleted tmp = interpreter.data_set.feature_info", "done in order for skater to recognize the values as categorical and not", "corss entropy or f1 ('f1', 'cross_entropy') #scorer_type='cross_entropy') # type: Figure, axes #scorer_type='f1') #", "categorical and not numerical test_x = _boolean2str(test_x) # create interpretation interpreter = Interpretation(test_x,", "file_path): fig, ax = self._skater_interpreter.feature_importance.plot_feature_importance( self._skater_model, ascending=True, ax=None, progressbar=False, # model-scoring: difference in", "skater to recognize the values as categorical and not numerical test_x = _boolean2str(test_x)", "import matplotlib.pyplot as plt plt.tight_layout() fig.savefig(file_path, dpi=fig.dpi) plt.close(fig) def _create_skater_stuff(mdl, test_x, test_z): from", "# if this can savely be deleted tmp = interpreter.data_set.feature_info for key, val", "values as categorical and not numerical test_x = _boolean2str(test_x) # create interpretation interpreter" ]
[ "\"\"\" search = tmdb.Search() response = search.movie(query=title) return search.results def search_tv(title): \"\"\" Connects", "= tmdb.Search() response = search.movie(query=title) return search.results def search_tv(title): \"\"\" Connects to API", "\"\"\" search = tmdb.Search() response = search.tv(query=title) return search.results def search_by_id(id, type): \"\"\"", "def search_tv(title): \"\"\" Connects to API to search for a specific tv show", "search.tv(query=title) return search.results def search_by_id(id, type): \"\"\" Connects to API to search for", "type == 'tv': result = tmdb.TV(id) else : result = tmdb.Movies(id) return result.info()", "search_by_id(id, type): \"\"\" Connects to API to search for a specific movie or", "Connects to API to search for a specific tv show by title. \"\"\"", "for a specific movie by title. \"\"\" search = tmdb.Search() response = search.movie(query=title)", "id. \"\"\" if type == 'tv': result = tmdb.TV(id) else : result =", "def search_movie(title): \"\"\" Connects to API to search for a specific movie by", "by title. \"\"\" search = tmdb.Search() response = search.movie(query=title) return search.results def search_tv(title):", "title. \"\"\" search = tmdb.Search() response = search.tv(query=title) return search.results def search_by_id(id, type):", "search for a specific movie or show by id. \"\"\" if type ==", "\"\"\" Connects to API to search for a specific tv show by title.", "or show by id. \"\"\" if type == 'tv': result = tmdb.TV(id) else", "search.results def search_tv(title): \"\"\" Connects to API to search for a specific tv", "by id. \"\"\" if type == 'tv': result = tmdb.TV(id) else : result", "response = search.tv(query=title) return search.results def search_by_id(id, type): \"\"\" Connects to API to", "tmdbsimple as tmdb def search_movie(title): \"\"\" Connects to API to search for a", "search = tmdb.Search() response = search.movie(query=title) return search.results def search_tv(title): \"\"\" Connects to", "title. \"\"\" search = tmdb.Search() response = search.movie(query=title) return search.results def search_tv(title): \"\"\"", "tmdb.Search() response = search.movie(query=title) return search.results def search_tv(title): \"\"\" Connects to API to", "show by title. \"\"\" search = tmdb.Search() response = search.tv(query=title) return search.results def", "return search.results def search_by_id(id, type): \"\"\" Connects to API to search for a", "\"\"\" Connects to API to search for a specific movie or show by", "tmdb.Search() response = search.tv(query=title) return search.results def search_by_id(id, type): \"\"\" Connects to API", "movie or show by id. \"\"\" if type == 'tv': result = tmdb.TV(id)", "a specific tv show by title. \"\"\" search = tmdb.Search() response = search.tv(query=title)", "search.movie(query=title) return search.results def search_tv(title): \"\"\" Connects to API to search for a", "<gh_stars>0 import tmdbsimple as tmdb def search_movie(title): \"\"\" Connects to API to search", "search = tmdb.Search() response = search.tv(query=title) return search.results def search_by_id(id, type): \"\"\" Connects", "return search.results def search_tv(title): \"\"\" Connects to API to search for a specific", "def search_by_id(id, type): \"\"\" Connects to API to search for a specific movie", "to search for a specific movie by title. \"\"\" search = tmdb.Search() response", "a specific movie by title. \"\"\" search = tmdb.Search() response = search.movie(query=title) return", "search.results def search_by_id(id, type): \"\"\" Connects to API to search for a specific", "for a specific movie or show by id. \"\"\" if type == 'tv':", "API to search for a specific tv show by title. \"\"\" search =", "tmdb def search_movie(title): \"\"\" Connects to API to search for a specific movie", "specific tv show by title. \"\"\" search = tmdb.Search() response = search.tv(query=title) return", "= search.movie(query=title) return search.results def search_tv(title): \"\"\" Connects to API to search for", "Connects to API to search for a specific movie or show by id.", "to API to search for a specific tv show by title. \"\"\" search", "to search for a specific tv show by title. \"\"\" search = tmdb.Search()", "specific movie by title. \"\"\" search = tmdb.Search() response = search.movie(query=title) return search.results", "to API to search for a specific movie or show by id. \"\"\"", "to API to search for a specific movie by title. \"\"\" search =", "\"\"\" Connects to API to search for a specific movie by title. \"\"\"", "= search.tv(query=title) return search.results def search_by_id(id, type): \"\"\" Connects to API to search", "API to search for a specific movie by title. \"\"\" search = tmdb.Search()", "search_movie(title): \"\"\" Connects to API to search for a specific movie by title.", "for a specific tv show by title. \"\"\" search = tmdb.Search() response =", "response = search.movie(query=title) return search.results def search_tv(title): \"\"\" Connects to API to search", "by title. \"\"\" search = tmdb.Search() response = search.tv(query=title) return search.results def search_by_id(id,", "specific movie or show by id. \"\"\" if type == 'tv': result =", "search_tv(title): \"\"\" Connects to API to search for a specific tv show by", "movie by title. \"\"\" search = tmdb.Search() response = search.movie(query=title) return search.results def", "Connects to API to search for a specific movie by title. \"\"\" search", "= tmdb.Search() response = search.tv(query=title) return search.results def search_by_id(id, type): \"\"\" Connects to", "search for a specific movie by title. \"\"\" search = tmdb.Search() response =", "API to search for a specific movie or show by id. \"\"\" if", "to search for a specific movie or show by id. \"\"\" if type", "a specific movie or show by id. \"\"\" if type == 'tv': result", "as tmdb def search_movie(title): \"\"\" Connects to API to search for a specific", "type): \"\"\" Connects to API to search for a specific movie or show", "tv show by title. \"\"\" search = tmdb.Search() response = search.tv(query=title) return search.results", "show by id. \"\"\" if type == 'tv': result = tmdb.TV(id) else :", "\"\"\" if type == 'tv': result = tmdb.TV(id) else : result = tmdb.Movies(id)", "search for a specific tv show by title. \"\"\" search = tmdb.Search() response", "if type == 'tv': result = tmdb.TV(id) else : result = tmdb.Movies(id) return", "import tmdbsimple as tmdb def search_movie(title): \"\"\" Connects to API to search for" ]
[ "packages to be created:\\n {}\".format(\"\\n \".join(packages))) # Get long description from README with", "open(requirements_file) as buffer: requirements = buffer.read().splitlines() requirements = list(set(requirements)) requirements_string = \"\\n \".join(requirements)", "import find_packages, setup # Read requirements files requirements_file = \"requirements.txt\" with open(requirements_file) as", "{requirements_file}:\\n {requirements_string}\") # Collect packages packages = find_packages(exclude=(\"tests\", \"experiments\")) print(\"Found the following packages", "as buffer: requirements = buffer.read().splitlines() requirements = list(set(requirements)) requirements_string = \"\\n \".join(requirements) print(f\"Found", "\"\\n \".join(requirements) print(f\"Found the following requirements to be installed from {requirements_file}:\\n {requirements_string}\") #", "from setuptools import find_packages, setup # Read requirements files requirements_file = \"requirements.txt\" with", "= \"\\n \".join(requirements) print(f\"Found the following requirements to be installed from {requirements_file}:\\n {requirements_string}\")", "setup( name=\"scholarscrape\", version=\"1.0.0\", packages=packages, python_requires=\">=3.10.0\", install_requires=requirements, setup_requires=[], ext_modules=[], url=\"https://github.com/JakobHavtorn/scholarscrape\", author=\"<NAME>\", description=\"Interfacing with Semanticscholar", "following packages to be created:\\n {}\".format(\"\\n \".join(packages))) # Get long description from README", "= readme.read() setup( name=\"scholarscrape\", version=\"1.0.0\", packages=packages, python_requires=\">=3.10.0\", install_requires=requirements, setup_requires=[], ext_modules=[], url=\"https://github.com/JakobHavtorn/scholarscrape\", author=\"<NAME>\", description=\"Interfacing", "be installed from {requirements_file}:\\n {requirements_string}\") # Collect packages packages = find_packages(exclude=(\"tests\", \"experiments\")) print(\"Found", "requirements to be installed from {requirements_file}:\\n {requirements_string}\") # Collect packages packages = find_packages(exclude=(\"tests\",", "Get long description from README with open(\"README.md\", \"r\") as readme: long_description = readme.read()", "print(\"Found the following packages to be created:\\n {}\".format(\"\\n \".join(packages))) # Get long description", "readme: long_description = readme.read() setup( name=\"scholarscrape\", version=\"1.0.0\", packages=packages, python_requires=\">=3.10.0\", install_requires=requirements, setup_requires=[], ext_modules=[], url=\"https://github.com/JakobHavtorn/scholarscrape\",", "\"requirements.txt\" with open(requirements_file) as buffer: requirements = buffer.read().splitlines() requirements = list(set(requirements)) requirements_string =", "from {requirements_file}:\\n {requirements_string}\") # Collect packages packages = find_packages(exclude=(\"tests\", \"experiments\")) print(\"Found the following", "installed from {requirements_file}:\\n {requirements_string}\") # Collect packages packages = find_packages(exclude=(\"tests\", \"experiments\")) print(\"Found the", "list(set(requirements)) requirements_string = \"\\n \".join(requirements) print(f\"Found the following requirements to be installed from", "# Read requirements files requirements_file = \"requirements.txt\" with open(requirements_file) as buffer: requirements =", "= list(set(requirements)) requirements_string = \"\\n \".join(requirements) print(f\"Found the following requirements to be installed", "requirements_string = \"\\n \".join(requirements) print(f\"Found the following requirements to be installed from {requirements_file}:\\n", "print(f\"Found the following requirements to be installed from {requirements_file}:\\n {requirements_string}\") # Collect packages", "to be created:\\n {}\".format(\"\\n \".join(packages))) # Get long description from README with open(\"README.md\",", "created:\\n {}\".format(\"\\n \".join(packages))) # Get long description from README with open(\"README.md\", \"r\") as", "readme.read() setup( name=\"scholarscrape\", version=\"1.0.0\", packages=packages, python_requires=\">=3.10.0\", install_requires=requirements, setup_requires=[], ext_modules=[], url=\"https://github.com/JakobHavtorn/scholarscrape\", author=\"<NAME>\", description=\"Interfacing with", "setuptools import find_packages, setup # Read requirements files requirements_file = \"requirements.txt\" with open(requirements_file)", "# Get long description from README with open(\"README.md\", \"r\") as readme: long_description =", "find_packages, setup # Read requirements files requirements_file = \"requirements.txt\" with open(requirements_file) as buffer:", "description from README with open(\"README.md\", \"r\") as readme: long_description = readme.read() setup( name=\"scholarscrape\",", "with open(\"README.md\", \"r\") as readme: long_description = readme.read() setup( name=\"scholarscrape\", version=\"1.0.0\", packages=packages, python_requires=\">=3.10.0\",", "setup_requires=[], ext_modules=[], url=\"https://github.com/JakobHavtorn/scholarscrape\", author=\"<NAME>\", description=\"Interfacing with Semanticscholar API for greatness\", long_description=long_description, long_description_content_type=\"text/markdown\", )", "\"r\") as readme: long_description = readme.read() setup( name=\"scholarscrape\", version=\"1.0.0\", packages=packages, python_requires=\">=3.10.0\", install_requires=requirements, setup_requires=[],", "buffer.read().splitlines() requirements = list(set(requirements)) requirements_string = \"\\n \".join(requirements) print(f\"Found the following requirements to", "with open(requirements_file) as buffer: requirements = buffer.read().splitlines() requirements = list(set(requirements)) requirements_string = \"\\n", "name=\"scholarscrape\", version=\"1.0.0\", packages=packages, python_requires=\">=3.10.0\", install_requires=requirements, setup_requires=[], ext_modules=[], url=\"https://github.com/JakobHavtorn/scholarscrape\", author=\"<NAME>\", description=\"Interfacing with Semanticscholar API", "the following requirements to be installed from {requirements_file}:\\n {requirements_string}\") # Collect packages packages", "= find_packages(exclude=(\"tests\", \"experiments\")) print(\"Found the following packages to be created:\\n {}\".format(\"\\n \".join(packages))) #", "long description from README with open(\"README.md\", \"r\") as readme: long_description = readme.read() setup(", "following requirements to be installed from {requirements_file}:\\n {requirements_string}\") # Collect packages packages =", "open(\"README.md\", \"r\") as readme: long_description = readme.read() setup( name=\"scholarscrape\", version=\"1.0.0\", packages=packages, python_requires=\">=3.10.0\", install_requires=requirements,", "to be installed from {requirements_file}:\\n {requirements_string}\") # Collect packages packages = find_packages(exclude=(\"tests\", \"experiments\"))", "as readme: long_description = readme.read() setup( name=\"scholarscrape\", version=\"1.0.0\", packages=packages, python_requires=\">=3.10.0\", install_requires=requirements, setup_requires=[], ext_modules=[],", "\".join(packages))) # Get long description from README with open(\"README.md\", \"r\") as readme: long_description", "buffer: requirements = buffer.read().splitlines() requirements = list(set(requirements)) requirements_string = \"\\n \".join(requirements) print(f\"Found the", "requirements_file = \"requirements.txt\" with open(requirements_file) as buffer: requirements = buffer.read().splitlines() requirements = list(set(requirements))", "Read requirements files requirements_file = \"requirements.txt\" with open(requirements_file) as buffer: requirements = buffer.read().splitlines()", "packages = find_packages(exclude=(\"tests\", \"experiments\")) print(\"Found the following packages to be created:\\n {}\".format(\"\\n \".join(packages)))", "files requirements_file = \"requirements.txt\" with open(requirements_file) as buffer: requirements = buffer.read().splitlines() requirements =", "find_packages(exclude=(\"tests\", \"experiments\")) print(\"Found the following packages to be created:\\n {}\".format(\"\\n \".join(packages))) # Get", "README with open(\"README.md\", \"r\") as readme: long_description = readme.read() setup( name=\"scholarscrape\", version=\"1.0.0\", packages=packages,", "long_description = readme.read() setup( name=\"scholarscrape\", version=\"1.0.0\", packages=packages, python_requires=\">=3.10.0\", install_requires=requirements, setup_requires=[], ext_modules=[], url=\"https://github.com/JakobHavtorn/scholarscrape\", author=\"<NAME>\",", "= \"requirements.txt\" with open(requirements_file) as buffer: requirements = buffer.read().splitlines() requirements = list(set(requirements)) requirements_string", "requirements = buffer.read().splitlines() requirements = list(set(requirements)) requirements_string = \"\\n \".join(requirements) print(f\"Found the following", "packages=packages, python_requires=\">=3.10.0\", install_requires=requirements, setup_requires=[], ext_modules=[], url=\"https://github.com/JakobHavtorn/scholarscrape\", author=\"<NAME>\", description=\"Interfacing with Semanticscholar API for greatness\",", "install_requires=requirements, setup_requires=[], ext_modules=[], url=\"https://github.com/JakobHavtorn/scholarscrape\", author=\"<NAME>\", description=\"Interfacing with Semanticscholar API for greatness\", long_description=long_description, long_description_content_type=\"text/markdown\",", "\".join(requirements) print(f\"Found the following requirements to be installed from {requirements_file}:\\n {requirements_string}\") # Collect", "# Collect packages packages = find_packages(exclude=(\"tests\", \"experiments\")) print(\"Found the following packages to be", "packages packages = find_packages(exclude=(\"tests\", \"experiments\")) print(\"Found the following packages to be created:\\n {}\".format(\"\\n", "be created:\\n {}\".format(\"\\n \".join(packages))) # Get long description from README with open(\"README.md\", \"r\")", "{}\".format(\"\\n \".join(packages))) # Get long description from README with open(\"README.md\", \"r\") as readme:", "version=\"1.0.0\", packages=packages, python_requires=\">=3.10.0\", install_requires=requirements, setup_requires=[], ext_modules=[], url=\"https://github.com/JakobHavtorn/scholarscrape\", author=\"<NAME>\", description=\"Interfacing with Semanticscholar API for", "setup # Read requirements files requirements_file = \"requirements.txt\" with open(requirements_file) as buffer: requirements", "<filename>setup.py from setuptools import find_packages, setup # Read requirements files requirements_file = \"requirements.txt\"", "= buffer.read().splitlines() requirements = list(set(requirements)) requirements_string = \"\\n \".join(requirements) print(f\"Found the following requirements", "requirements = list(set(requirements)) requirements_string = \"\\n \".join(requirements) print(f\"Found the following requirements to be", "Collect packages packages = find_packages(exclude=(\"tests\", \"experiments\")) print(\"Found the following packages to be created:\\n", "the following packages to be created:\\n {}\".format(\"\\n \".join(packages))) # Get long description from", "from README with open(\"README.md\", \"r\") as readme: long_description = readme.read() setup( name=\"scholarscrape\", version=\"1.0.0\",", "requirements files requirements_file = \"requirements.txt\" with open(requirements_file) as buffer: requirements = buffer.read().splitlines() requirements", "python_requires=\">=3.10.0\", install_requires=requirements, setup_requires=[], ext_modules=[], url=\"https://github.com/JakobHavtorn/scholarscrape\", author=\"<NAME>\", description=\"Interfacing with Semanticscholar API for greatness\", long_description=long_description,", "\"experiments\")) print(\"Found the following packages to be created:\\n {}\".format(\"\\n \".join(packages))) # Get long", "{requirements_string}\") # Collect packages packages = find_packages(exclude=(\"tests\", \"experiments\")) print(\"Found the following packages to" ]
[ "\"city\": address_1.city, \"companyName\": address_1.company_name, \"id\": address_1.id, \"street\": address_1.street, \"zip\": address_1.zip, }, { \"city\":", "data[\"companyName\"] assert address.street == data[\"street\"] assert address.zip == data[\"zip\"] def test_edit_someone_else_delivery_address(client_buyer, seller, buyer,", "== 404 def test_add_delivery_address(client_seller, seller, faker): data = { \"city\": faker.city(), \"companyName\": faker.company(),", "= client_buyer.put(f\"/delivery_addresses/{address.id}\", data) assert response.status_code == 404 def test_partially_edit_delivery_address(client_seller, seller, faker): address =", "address.city == data[\"city\"] assert address.company_name == data[\"companyName\"] assert address.street == data[\"street\"] assert address.zip", "address.refresh_from_db() assert address.city == data[\"city\"] assert address.company_name == data[\"companyName\"] assert address.street == data[\"street\"]", "baker.make_recipe(\"delivery_addresses.delivery_address\", user=seller) response = client_seller.get(f\"/delivery_addresses/{address.id}\") assert response.json() == { \"city\": address.city, \"companyName\": address.company_name,", "\"id\": address.id, \"street\": address.street, \"zip\": address.zip, } def test_get_someone_else_delivery_address(client_seller, buyer): address = baker.make_recipe(\"delivery_addresses.delivery_address\",", "deleted due to protected related entities.\", \"code\": \"protected_error\", } def test_delete_someone_else_delivery_address(client_seller, buyer, faker):", "from delivery_addresses.models import DeliveryAddress pytestmark = pytest.mark.django_db def test_get_own_delivery_addresses(client_seller, seller, buyer): baker.make_recipe(\"delivery_addresses.delivery_address\", user=buyer)", "= client_seller.delete(f\"/delivery_addresses/{address.id}\") assert response.status_code == 400 assert response.json() == { \"message\": \"Cannot be", "{ \"city\": faker.city(), \"companyName\": faker.company(), \"street\": faker.street_address(), \"zip\": faker.zipcode(), } response = client_buyer.put(f\"/delivery_addresses/{address.id}\",", "data) assert response.json()[key] == value def test_partially_edit_someone_else_delivery_address( client_buyer, seller, buyer, faker ): address", "= client_seller.delete(f\"/delivery_addresses/{address.id}\") with pytest.raises(DeliveryAddress.DoesNotExist): address.refresh_from_db() def test_delete_delivery_address_when_in_use(client_seller, seller, faker): address = baker.make_recipe(\"delivery_addresses.delivery_address\", user=seller)", "cart = baker.make_recipe(\"carts.cart\", delivery_address=address, user=seller) response = client_seller.delete(f\"/delivery_addresses/{address.id}\") assert response.status_code == 400 assert", "user=seller) response = client_seller.delete(f\"/delivery_addresses/{address.id}\") assert response.status_code == 400 assert response.json() == { \"message\":", "faker.zipcode(), } for key, value in data.items(): response = client_seller.patch(f\"/delivery_addresses/{address.id}\", data) assert response.json()[key]", "{\"city\": faker.city()} ) assert response.status_code == 404 def test_delete_delivery_address(client_seller, seller, faker): address =", "address.company_name == data[\"companyName\"] assert address.street == data[\"street\"] assert address.zip == data[\"zip\"] def test_edit_someone_else_delivery_address(client_buyer,", "user=seller) response = client_seller.get(f\"/delivery_addresses/{address.id}\") assert response.json() == { \"city\": address.city, \"companyName\": address.company_name, \"id\":", "== data[\"city\"] assert address.company_name == data[\"companyName\"] assert address.street == data[\"street\"] assert address.zip ==", "faker ): address = baker.make_recipe(\"delivery_addresses.delivery_address\", user=seller) response = client_buyer.patch( f\"/delivery_addresses/{address.id}\", {\"city\": faker.city()} )", "address = baker.make_recipe(\"delivery_addresses.delivery_address\", user=seller) cart = baker.make_recipe(\"carts.cart\", delivery_address=address, user=seller) response = client_seller.delete(f\"/delivery_addresses/{address.id}\") assert", "\"id\": address.id, \"companyName\": data[\"companyName\"], \"street\": data[\"street\"], \"zip\": data[\"zip\"], \"city\": data[\"city\"], } address.refresh_from_db() assert", "assert response.json() == { \"id\": DeliveryAddress.objects.first().id, \"companyName\": data[\"companyName\"], \"street\": data[\"street\"], \"zip\": data[\"zip\"], \"city\":", "delivery_address=address, user=seller) response = client_seller.delete(f\"/delivery_addresses/{address.id}\") assert response.status_code == 400 assert response.json() == {", "response = client_seller.patch(f\"/delivery_addresses/{address.id}\", data) assert response.json()[key] == value def test_partially_edit_someone_else_delivery_address( client_buyer, seller, buyer,", "address_1.street, \"zip\": address_1.zip, }, { \"city\": address_2.city, \"companyName\": address_2.company_name, \"id\": address_2.id, \"street\": address_2.street,", "address = baker.make_recipe(\"delivery_addresses.delivery_address\", user=seller) response = client_seller.get(f\"/delivery_addresses/{address.id}\") assert response.json() == { \"city\": address.city,", "seller, faker): address = baker.make_recipe(\"delivery_addresses.delivery_address\", user=seller) data = { \"city\": faker.city(), \"companyName\": faker.company(),", "address.id, \"companyName\": data[\"companyName\"], \"street\": data[\"street\"], \"zip\": data[\"zip\"], \"city\": data[\"city\"], } address.refresh_from_db() assert address.city", "404 def test_delete_delivery_address(client_seller, seller, faker): address = baker.make_recipe(\"delivery_addresses.delivery_address\", user=seller) response = client_seller.delete(f\"/delivery_addresses/{address.id}\") with", "\"city\": faker.city(), \"companyName\": faker.company(), \"street\": faker.street_address(), \"zip\": faker.zipcode(), } response = client_buyer.put(f\"/delivery_addresses/{address.id}\", data)", "buyer, faker ): address = baker.make_recipe(\"delivery_addresses.delivery_address\", user=seller) response = client_buyer.patch( f\"/delivery_addresses/{address.id}\", {\"city\": faker.city()}", "due to protected related entities.\", \"code\": \"protected_error\", } def test_delete_someone_else_delivery_address(client_seller, buyer, faker): address", "client_seller.post(\"/delivery_addresses\", data) assert response.json() == { \"id\": DeliveryAddress.objects.first().id, \"companyName\": data[\"companyName\"], \"street\": data[\"street\"], \"zip\":", "data) assert response.json() == { \"id\": DeliveryAddress.objects.first().id, \"companyName\": data[\"companyName\"], \"street\": data[\"street\"], \"zip\": data[\"zip\"],", "\"street\": address_2.street, \"zip\": address_2.zip, }, ] def test_get_own_delivery_address(client_seller, seller): address = baker.make_recipe(\"delivery_addresses.delivery_address\", user=seller)", "\"companyName\": faker.company(), \"street\": faker.street_address(), \"zip\": faker.zipcode(), } response = client_buyer.put(f\"/delivery_addresses/{address.id}\", data) assert response.status_code", "data = { \"city\": faker.city(), \"companyName\": faker.company(), \"street\": faker.street_address(), \"zip\": faker.zipcode(), } for", "address = baker.make_recipe(\"delivery_addresses.delivery_address\", user=seller) data = { \"city\": faker.city(), \"companyName\": faker.company(), \"street\": faker.street_address(),", "user=seller) response = client_buyer.patch( f\"/delivery_addresses/{address.id}\", {\"city\": faker.city()} ) assert response.status_code == 404 def", "} response = client_seller.put(f\"/delivery_addresses/{address.id}\", data) assert response.json() == { \"id\": address.id, \"companyName\": data[\"companyName\"],", "address = baker.make_recipe(\"delivery_addresses.delivery_address\", user=buyer) response = client_seller.delete(f\"/delivery_addresses/{address.id}\") assert response.status_code == 404 assert not", "response.status_code == 400 assert response.json() == { \"message\": \"Cannot be deleted due to", "\"zip\": faker.zipcode(), } for key, value in data.items(): response = client_seller.patch(f\"/delivery_addresses/{address.id}\", data) assert", "client_seller.delete(f\"/delivery_addresses/{address.id}\") with pytest.raises(DeliveryAddress.DoesNotExist): address.refresh_from_db() def test_delete_delivery_address_when_in_use(client_seller, seller, faker): address = baker.make_recipe(\"delivery_addresses.delivery_address\", user=seller) cart", "404 def test_partially_edit_delivery_address(client_seller, seller, faker): address = baker.make_recipe(\"delivery_addresses.delivery_address\", user=seller) data = { \"city\":", "= baker.make_recipe(\"delivery_addresses.delivery_address\", user=seller) response = client_seller.get(f\"/delivery_addresses/{address.id}\") assert response.json() == { \"city\": address.city, \"companyName\":", "in data.items(): response = client_seller.patch(f\"/delivery_addresses/{address.id}\", data) assert response.json()[key] == value def test_partially_edit_someone_else_delivery_address( client_buyer,", "faker.city(), \"companyName\": faker.company(), \"street\": faker.street_address(), \"zip\": faker.zipcode(), } response = client_seller.post(\"/delivery_addresses\", data) assert", "= baker.make_recipe(\"carts.cart\", delivery_address=address, user=seller) response = client_seller.delete(f\"/delivery_addresses/{address.id}\") assert response.status_code == 400 assert response.json()", "client_seller.put(f\"/delivery_addresses/{address.id}\", data) assert response.json() == { \"id\": address.id, \"companyName\": data[\"companyName\"], \"street\": data[\"street\"], \"zip\":", "entities.\", \"code\": \"protected_error\", } def test_delete_someone_else_delivery_address(client_seller, buyer, faker): address = baker.make_recipe(\"delivery_addresses.delivery_address\", user=buyer) response", "}, { \"city\": address_2.city, \"companyName\": address_2.company_name, \"id\": address_2.id, \"street\": address_2.street, \"zip\": address_2.zip, },", "\"id\": address_2.id, \"street\": address_2.street, \"zip\": address_2.zip, }, ] def test_get_own_delivery_address(client_seller, seller): address =", "assert response.json() == { \"city\": address.city, \"companyName\": address.company_name, \"id\": address.id, \"street\": address.street, \"zip\":", "be deleted due to protected related entities.\", \"code\": \"protected_error\", } def test_delete_someone_else_delivery_address(client_seller, buyer,", "data[\"city\"], } def test_edit_delivery_address(client_seller, seller, faker): address = baker.make_recipe(\"delivery_addresses.delivery_address\", user=seller) data = {", "== 404 def test_delete_delivery_address(client_seller, seller, faker): address = baker.make_recipe(\"delivery_addresses.delivery_address\", user=seller) response = client_seller.delete(f\"/delivery_addresses/{address.id}\")", "} for key, value in data.items(): response = client_seller.patch(f\"/delivery_addresses/{address.id}\", data) assert response.json()[key] ==", "\"Cannot be deleted due to protected related entities.\", \"code\": \"protected_error\", } def test_delete_someone_else_delivery_address(client_seller,", "def test_delete_someone_else_delivery_address(client_seller, buyer, faker): address = baker.make_recipe(\"delivery_addresses.delivery_address\", user=buyer) response = client_seller.delete(f\"/delivery_addresses/{address.id}\") assert response.status_code", "response = client_seller.get(f\"/delivery_addresses/{address.id}\") assert response.json() == { \"city\": address.city, \"companyName\": address.company_name, \"id\": address.id,", "buyer): baker.make_recipe(\"delivery_addresses.delivery_address\", user=buyer) address_1, address_2 = baker.make_recipe( \"delivery_addresses.delivery_address\", user=seller, _quantity=2 ) response =", "DeliveryAddress pytestmark = pytest.mark.django_db def test_get_own_delivery_addresses(client_seller, seller, buyer): baker.make_recipe(\"delivery_addresses.delivery_address\", user=buyer) address_1, address_2 =", "\"zip\": address_1.zip, }, { \"city\": address_2.city, \"companyName\": address_2.company_name, \"id\": address_2.id, \"street\": address_2.street, \"zip\":", "baker.make_recipe(\"delivery_addresses.delivery_address\", user=seller) response = client_seller.delete(f\"/delivery_addresses/{address.id}\") with pytest.raises(DeliveryAddress.DoesNotExist): address.refresh_from_db() def test_delete_delivery_address_when_in_use(client_seller, seller, faker): address", "\"street\": data[\"street\"], \"zip\": data[\"zip\"], \"city\": data[\"city\"], } address.refresh_from_db() assert address.city == data[\"city\"] assert", "== data[\"companyName\"] assert address.street == data[\"street\"] assert address.zip == data[\"zip\"] def test_edit_someone_else_delivery_address(client_buyer, seller,", "address_2.zip, }, ] def test_get_own_delivery_address(client_seller, seller): address = baker.make_recipe(\"delivery_addresses.delivery_address\", user=seller) response = client_seller.get(f\"/delivery_addresses/{address.id}\")", "response = client_buyer.patch( f\"/delivery_addresses/{address.id}\", {\"city\": faker.city()} ) assert response.status_code == 404 def test_delete_delivery_address(client_seller,", "\"zip\": address.zip, } def test_get_someone_else_delivery_address(client_seller, buyer): address = baker.make_recipe(\"delivery_addresses.delivery_address\", user=buyer) response = client_seller.get(f\"/delivery_addresses/{address.id}\")", "= baker.make_recipe(\"delivery_addresses.delivery_address\", user=buyer) response = client_seller.get(f\"/delivery_addresses/{address.id}\") assert response.status_code == 404 def test_add_delivery_address(client_seller, seller,", "seller, buyer): baker.make_recipe(\"delivery_addresses.delivery_address\", user=buyer) address_1, address_2 = baker.make_recipe( \"delivery_addresses.delivery_address\", user=seller, _quantity=2 ) response", "= pytest.mark.django_db def test_get_own_delivery_addresses(client_seller, seller, buyer): baker.make_recipe(\"delivery_addresses.delivery_address\", user=buyer) address_1, address_2 = baker.make_recipe( \"delivery_addresses.delivery_address\",", "delivery_addresses.models import DeliveryAddress pytestmark = pytest.mark.django_db def test_get_own_delivery_addresses(client_seller, seller, buyer): baker.make_recipe(\"delivery_addresses.delivery_address\", user=buyer) address_1,", "pytestmark = pytest.mark.django_db def test_get_own_delivery_addresses(client_seller, seller, buyer): baker.make_recipe(\"delivery_addresses.delivery_address\", user=buyer) address_1, address_2 = baker.make_recipe(", "client_seller.get(f\"/delivery_addresses/{address.id}\") assert response.status_code == 404 def test_add_delivery_address(client_seller, seller, faker): data = { \"city\":", "user=seller) cart = baker.make_recipe(\"carts.cart\", delivery_address=address, user=seller) response = client_seller.delete(f\"/delivery_addresses/{address.id}\") assert response.status_code == 400", "}, ] def test_get_own_delivery_address(client_seller, seller): address = baker.make_recipe(\"delivery_addresses.delivery_address\", user=seller) response = client_seller.get(f\"/delivery_addresses/{address.id}\") assert", "address_2.company_name, \"id\": address_2.id, \"street\": address_2.street, \"zip\": address_2.zip, }, ] def test_get_own_delivery_address(client_seller, seller): address", "DeliveryAddress.objects.first().id, \"companyName\": data[\"companyName\"], \"street\": data[\"street\"], \"zip\": data[\"zip\"], \"city\": data[\"city\"], } def test_edit_delivery_address(client_seller, seller,", "{ \"city\": address.city, \"companyName\": address.company_name, \"id\": address.id, \"street\": address.street, \"zip\": address.zip, } def", "= baker.make_recipe(\"delivery_addresses.delivery_address\", user=seller) response = client_buyer.patch( f\"/delivery_addresses/{address.id}\", {\"city\": faker.city()} ) assert response.status_code ==", "{ \"city\": faker.city(), \"companyName\": faker.company(), \"street\": faker.street_address(), \"zip\": faker.zipcode(), } response = client_seller.post(\"/delivery_addresses\",", "} response = client_buyer.put(f\"/delivery_addresses/{address.id}\", data) assert response.status_code == 404 def test_partially_edit_delivery_address(client_seller, seller, faker):", "address_2.city, \"companyName\": address_2.company_name, \"id\": address_2.id, \"street\": address_2.street, \"zip\": address_2.zip, }, ] def test_get_own_delivery_address(client_seller,", "user=seller) response = client_seller.delete(f\"/delivery_addresses/{address.id}\") with pytest.raises(DeliveryAddress.DoesNotExist): address.refresh_from_db() def test_delete_delivery_address_when_in_use(client_seller, seller, faker): address =", "baker from delivery_addresses.models import DeliveryAddress pytestmark = pytest.mark.django_db def test_get_own_delivery_addresses(client_seller, seller, buyer): baker.make_recipe(\"delivery_addresses.delivery_address\",", "= baker.make_recipe(\"delivery_addresses.delivery_address\", user=buyer) response = client_seller.delete(f\"/delivery_addresses/{address.id}\") assert response.status_code == 404 assert not address.refresh_from_db()", "{ \"id\": DeliveryAddress.objects.first().id, \"companyName\": data[\"companyName\"], \"street\": data[\"street\"], \"zip\": data[\"zip\"], \"city\": data[\"city\"], } def", "baker.make_recipe(\"delivery_addresses.delivery_address\", user=buyer) address_1, address_2 = baker.make_recipe( \"delivery_addresses.delivery_address\", user=seller, _quantity=2 ) response = client_seller.get(\"/delivery_addresses\")", "data = { \"city\": faker.city(), \"companyName\": faker.company(), \"street\": faker.street_address(), \"zip\": faker.zipcode(), } response", "\"zip\": faker.zipcode(), } response = client_seller.post(\"/delivery_addresses\", data) assert response.json() == { \"id\": DeliveryAddress.objects.first().id,", "address.company_name, \"id\": address.id, \"street\": address.street, \"zip\": address.zip, } def test_get_someone_else_delivery_address(client_seller, buyer): address =", "\"companyName\": data[\"companyName\"], \"street\": data[\"street\"], \"zip\": data[\"zip\"], \"city\": data[\"city\"], } def test_edit_delivery_address(client_seller, seller, faker):", "== 400 assert response.json() == { \"message\": \"Cannot be deleted due to protected", "response = client_seller.get(f\"/delivery_addresses/{address.id}\") assert response.status_code == 404 def test_add_delivery_address(client_seller, seller, faker): data =", "response.status_code == 404 def test_add_delivery_address(client_seller, seller, faker): data = { \"city\": faker.city(), \"companyName\":", "assert response.status_code == 400 assert response.json() == { \"message\": \"Cannot be deleted due", "\"code\": \"protected_error\", } def test_delete_someone_else_delivery_address(client_seller, buyer, faker): address = baker.make_recipe(\"delivery_addresses.delivery_address\", user=buyer) response =", "data) assert response.json() == { \"id\": address.id, \"companyName\": data[\"companyName\"], \"street\": data[\"street\"], \"zip\": data[\"zip\"],", "{ \"message\": \"Cannot be deleted due to protected related entities.\", \"code\": \"protected_error\", }", "response.json() == { \"city\": address.city, \"companyName\": address.company_name, \"id\": address.id, \"street\": address.street, \"zip\": address.zip,", "= client_seller.get(f\"/delivery_addresses/{address.id}\") assert response.json() == { \"city\": address.city, \"companyName\": address.company_name, \"id\": address.id, \"street\":", "pytest.raises(DeliveryAddress.DoesNotExist): address.refresh_from_db() def test_delete_delivery_address_when_in_use(client_seller, seller, faker): address = baker.make_recipe(\"delivery_addresses.delivery_address\", user=seller) cart = baker.make_recipe(\"carts.cart\",", "address.id, \"street\": address.street, \"zip\": address.zip, } def test_get_someone_else_delivery_address(client_seller, buyer): address = baker.make_recipe(\"delivery_addresses.delivery_address\", user=buyer)", "== { \"id\": address.id, \"companyName\": data[\"companyName\"], \"street\": data[\"street\"], \"zip\": data[\"zip\"], \"city\": data[\"city\"], }", "client_seller.patch(f\"/delivery_addresses/{address.id}\", data) assert response.json()[key] == value def test_partially_edit_someone_else_delivery_address( client_buyer, seller, buyer, faker ):", "} response = client_seller.post(\"/delivery_addresses\", data) assert response.json() == { \"id\": DeliveryAddress.objects.first().id, \"companyName\": data[\"companyName\"],", "user=seller) data = { \"city\": faker.city(), \"companyName\": faker.company(), \"street\": faker.street_address(), \"zip\": faker.zipcode(), }", "baker.make_recipe(\"delivery_addresses.delivery_address\", user=seller) cart = baker.make_recipe(\"carts.cart\", delivery_address=address, user=seller) response = client_seller.delete(f\"/delivery_addresses/{address.id}\") assert response.status_code ==", "} address.refresh_from_db() assert address.city == data[\"city\"] assert address.company_name == data[\"companyName\"] assert address.street ==", "\"city\": faker.city(), \"companyName\": faker.company(), \"street\": faker.street_address(), \"zip\": faker.zipcode(), } response = client_seller.post(\"/delivery_addresses\", data)", "faker.street_address(), \"zip\": faker.zipcode(), } response = client_seller.post(\"/delivery_addresses\", data) assert response.json() == { \"id\":", "baker.make_recipe(\"delivery_addresses.delivery_address\", user=seller) response = client_buyer.patch( f\"/delivery_addresses/{address.id}\", {\"city\": faker.city()} ) assert response.status_code == 404", "= baker.make_recipe( \"delivery_addresses.delivery_address\", user=seller, _quantity=2 ) response = client_seller.get(\"/delivery_addresses\") assert response.json() == [", "address_1.company_name, \"id\": address_1.id, \"street\": address_1.street, \"zip\": address_1.zip, }, { \"city\": address_2.city, \"companyName\": address_2.company_name,", "\"zip\": data[\"zip\"], \"city\": data[\"city\"], } def test_edit_delivery_address(client_seller, seller, faker): address = baker.make_recipe(\"delivery_addresses.delivery_address\", user=seller)", "\"city\": data[\"city\"], } address.refresh_from_db() assert address.city == data[\"city\"] assert address.company_name == data[\"companyName\"] assert", "data[\"street\"], \"zip\": data[\"zip\"], \"city\": data[\"city\"], } address.refresh_from_db() assert address.city == data[\"city\"] assert address.company_name", ") assert response.status_code == 404 def test_delete_delivery_address(client_seller, seller, faker): address = baker.make_recipe(\"delivery_addresses.delivery_address\", user=seller)", "pytest.mark.django_db def test_get_own_delivery_addresses(client_seller, seller, buyer): baker.make_recipe(\"delivery_addresses.delivery_address\", user=buyer) address_1, address_2 = baker.make_recipe( \"delivery_addresses.delivery_address\", user=seller,", "assert response.json() == [ { \"city\": address_1.city, \"companyName\": address_1.company_name, \"id\": address_1.id, \"street\": address_1.street,", "def test_delete_delivery_address_when_in_use(client_seller, seller, faker): address = baker.make_recipe(\"delivery_addresses.delivery_address\", user=seller) cart = baker.make_recipe(\"carts.cart\", delivery_address=address, user=seller)", "buyer, faker): address = baker.make_recipe(\"delivery_addresses.delivery_address\", user=seller) data = { \"city\": faker.city(), \"companyName\": faker.company(),", "import random import pytest from model_bakery import baker from delivery_addresses.models import DeliveryAddress pytestmark", "response = client_seller.delete(f\"/delivery_addresses/{address.id}\") with pytest.raises(DeliveryAddress.DoesNotExist): address.refresh_from_db() def test_delete_delivery_address_when_in_use(client_seller, seller, faker): address = baker.make_recipe(\"delivery_addresses.delivery_address\",", "baker.make_recipe(\"delivery_addresses.delivery_address\", user=seller) data = { \"city\": faker.city(), \"companyName\": faker.company(), \"street\": faker.street_address(), \"zip\": faker.zipcode(),", "faker): data = { \"city\": faker.city(), \"companyName\": faker.company(), \"street\": faker.street_address(), \"zip\": faker.zipcode(), }", "address.city, \"companyName\": address.company_name, \"id\": address.id, \"street\": address.street, \"zip\": address.zip, } def test_get_someone_else_delivery_address(client_seller, buyer):", "response.status_code == 404 def test_delete_delivery_address(client_seller, seller, faker): address = baker.make_recipe(\"delivery_addresses.delivery_address\", user=seller) response =", "value in data.items(): response = client_seller.patch(f\"/delivery_addresses/{address.id}\", data) assert response.json()[key] == value def test_partially_edit_someone_else_delivery_address(", "assert address.city == data[\"city\"] assert address.company_name == data[\"companyName\"] assert address.street == data[\"street\"] assert", "data[\"companyName\"], \"street\": data[\"street\"], \"zip\": data[\"zip\"], \"city\": data[\"city\"], } address.refresh_from_db() assert address.city == data[\"city\"]", "address_2.id, \"street\": address_2.street, \"zip\": address_2.zip, }, ] def test_get_own_delivery_address(client_seller, seller): address = baker.make_recipe(\"delivery_addresses.delivery_address\",", "address_1.city, \"companyName\": address_1.company_name, \"id\": address_1.id, \"street\": address_1.street, \"zip\": address_1.zip, }, { \"city\": address_2.city,", "def test_partially_edit_someone_else_delivery_address( client_buyer, seller, buyer, faker ): address = baker.make_recipe(\"delivery_addresses.delivery_address\", user=seller) response =", "assert address.company_name == data[\"companyName\"] assert address.street == data[\"street\"] assert address.zip == data[\"zip\"] def", "== data[\"street\"] assert address.zip == data[\"zip\"] def test_edit_someone_else_delivery_address(client_buyer, seller, buyer, faker): address =", "\"companyName\": address_2.company_name, \"id\": address_2.id, \"street\": address_2.street, \"zip\": address_2.zip, }, ] def test_get_own_delivery_address(client_seller, seller):", "address = baker.make_recipe(\"delivery_addresses.delivery_address\", user=buyer) response = client_seller.get(f\"/delivery_addresses/{address.id}\") assert response.status_code == 404 def test_add_delivery_address(client_seller,", "faker): address = baker.make_recipe(\"delivery_addresses.delivery_address\", user=seller) response = client_seller.delete(f\"/delivery_addresses/{address.id}\") with pytest.raises(DeliveryAddress.DoesNotExist): address.refresh_from_db() def test_delete_delivery_address_when_in_use(client_seller,", "seller, buyer, faker ): address = baker.make_recipe(\"delivery_addresses.delivery_address\", user=seller) response = client_buyer.patch( f\"/delivery_addresses/{address.id}\", {\"city\":", "data.items(): response = client_seller.patch(f\"/delivery_addresses/{address.id}\", data) assert response.json()[key] == value def test_partially_edit_someone_else_delivery_address( client_buyer, seller,", "404 def test_add_delivery_address(client_seller, seller, faker): data = { \"city\": faker.city(), \"companyName\": faker.company(), \"street\":", "test_add_delivery_address(client_seller, seller, faker): data = { \"city\": faker.city(), \"companyName\": faker.company(), \"street\": faker.street_address(), \"zip\":", "faker.city(), \"companyName\": faker.company(), \"street\": faker.street_address(), \"zip\": faker.zipcode(), } response = client_buyer.put(f\"/delivery_addresses/{address.id}\", data) assert", "seller, faker): address = baker.make_recipe(\"delivery_addresses.delivery_address\", user=seller) cart = baker.make_recipe(\"carts.cart\", delivery_address=address, user=seller) response =", "faker.street_address(), \"zip\": faker.zipcode(), } for key, value in data.items(): response = client_seller.patch(f\"/delivery_addresses/{address.id}\", data)", "data[\"companyName\"], \"street\": data[\"street\"], \"zip\": data[\"zip\"], \"city\": data[\"city\"], } def test_edit_delivery_address(client_seller, seller, faker): address", "key, value in data.items(): response = client_seller.patch(f\"/delivery_addresses/{address.id}\", data) assert response.json()[key] == value def", "test_edit_delivery_address(client_seller, seller, faker): address = baker.make_recipe(\"delivery_addresses.delivery_address\", user=seller) data = { \"city\": faker.city(), \"companyName\":", "assert response.json() == { \"id\": address.id, \"companyName\": data[\"companyName\"], \"street\": data[\"street\"], \"zip\": data[\"zip\"], \"city\":", "\"street\": address.street, \"zip\": address.zip, } def test_get_someone_else_delivery_address(client_seller, buyer): address = baker.make_recipe(\"delivery_addresses.delivery_address\", user=buyer) response", "test_delete_someone_else_delivery_address(client_seller, buyer, faker): address = baker.make_recipe(\"delivery_addresses.delivery_address\", user=buyer) response = client_seller.delete(f\"/delivery_addresses/{address.id}\") assert response.status_code ==", "response = client_seller.post(\"/delivery_addresses\", data) assert response.json() == { \"id\": DeliveryAddress.objects.first().id, \"companyName\": data[\"companyName\"], \"street\":", "test_edit_someone_else_delivery_address(client_buyer, seller, buyer, faker): address = baker.make_recipe(\"delivery_addresses.delivery_address\", user=seller) data = { \"city\": faker.city(),", "\"zip\": address_2.zip, }, ] def test_get_own_delivery_address(client_seller, seller): address = baker.make_recipe(\"delivery_addresses.delivery_address\", user=seller) response =", "== [ { \"city\": address_1.city, \"companyName\": address_1.company_name, \"id\": address_1.id, \"street\": address_1.street, \"zip\": address_1.zip,", "test_delete_delivery_address_when_in_use(client_seller, seller, faker): address = baker.make_recipe(\"delivery_addresses.delivery_address\", user=seller) cart = baker.make_recipe(\"carts.cart\", delivery_address=address, user=seller) response", "\"city\": address_2.city, \"companyName\": address_2.company_name, \"id\": address_2.id, \"street\": address_2.street, \"zip\": address_2.zip, }, ] def", "assert address.street == data[\"street\"] assert address.zip == data[\"zip\"] def test_edit_someone_else_delivery_address(client_buyer, seller, buyer, faker):", "baker.make_recipe(\"delivery_addresses.delivery_address\", user=buyer) response = client_seller.get(f\"/delivery_addresses/{address.id}\") assert response.status_code == 404 def test_add_delivery_address(client_seller, seller, faker):", "faker.company(), \"street\": faker.street_address(), \"zip\": faker.zipcode(), } response = client_buyer.put(f\"/delivery_addresses/{address.id}\", data) assert response.status_code ==", "= { \"city\": faker.city(), \"companyName\": faker.company(), \"street\": faker.street_address(), \"zip\": faker.zipcode(), } for key,", "faker): address = baker.make_recipe(\"delivery_addresses.delivery_address\", user=seller) cart = baker.make_recipe(\"carts.cart\", delivery_address=address, user=seller) response = client_seller.delete(f\"/delivery_addresses/{address.id}\")", "data[\"street\"], \"zip\": data[\"zip\"], \"city\": data[\"city\"], } def test_edit_delivery_address(client_seller, seller, faker): address = baker.make_recipe(\"delivery_addresses.delivery_address\",", "pytest from model_bakery import baker from delivery_addresses.models import DeliveryAddress pytestmark = pytest.mark.django_db def", "def test_add_delivery_address(client_seller, seller, faker): data = { \"city\": faker.city(), \"companyName\": faker.company(), \"street\": faker.street_address(),", "client_seller.delete(f\"/delivery_addresses/{address.id}\") assert response.status_code == 400 assert response.json() == { \"message\": \"Cannot be deleted", "import pytest from model_bakery import baker from delivery_addresses.models import DeliveryAddress pytestmark = pytest.mark.django_db", "client_seller.get(\"/delivery_addresses\") assert response.json() == [ { \"city\": address_1.city, \"companyName\": address_1.company_name, \"id\": address_1.id, \"street\":", "\"street\": faker.street_address(), \"zip\": faker.zipcode(), } response = client_seller.put(f\"/delivery_addresses/{address.id}\", data) assert response.json() == {", "= client_seller.patch(f\"/delivery_addresses/{address.id}\", data) assert response.json()[key] == value def test_partially_edit_someone_else_delivery_address( client_buyer, seller, buyer, faker", "data[\"street\"] assert address.zip == data[\"zip\"] def test_edit_someone_else_delivery_address(client_buyer, seller, buyer, faker): address = baker.make_recipe(\"delivery_addresses.delivery_address\",", "\"delivery_addresses.delivery_address\", user=seller, _quantity=2 ) response = client_seller.get(\"/delivery_addresses\") assert response.json() == [ { \"city\":", "= baker.make_recipe(\"delivery_addresses.delivery_address\", user=seller) cart = baker.make_recipe(\"carts.cart\", delivery_address=address, user=seller) response = client_seller.delete(f\"/delivery_addresses/{address.id}\") assert response.status_code", "response.status_code == 404 def test_partially_edit_delivery_address(client_seller, seller, faker): address = baker.make_recipe(\"delivery_addresses.delivery_address\", user=seller) data =", "data[\"zip\"], \"city\": data[\"city\"], } address.refresh_from_db() assert address.city == data[\"city\"] assert address.company_name == data[\"companyName\"]", "import DeliveryAddress pytestmark = pytest.mark.django_db def test_get_own_delivery_addresses(client_seller, seller, buyer): baker.make_recipe(\"delivery_addresses.delivery_address\", user=buyer) address_1, address_2", "} def test_get_someone_else_delivery_address(client_seller, buyer): address = baker.make_recipe(\"delivery_addresses.delivery_address\", user=buyer) response = client_seller.get(f\"/delivery_addresses/{address.id}\") assert response.status_code", "address.refresh_from_db() def test_delete_delivery_address_when_in_use(client_seller, seller, faker): address = baker.make_recipe(\"delivery_addresses.delivery_address\", user=seller) cart = baker.make_recipe(\"carts.cart\", delivery_address=address,", "with pytest.raises(DeliveryAddress.DoesNotExist): address.refresh_from_db() def test_delete_delivery_address_when_in_use(client_seller, seller, faker): address = baker.make_recipe(\"delivery_addresses.delivery_address\", user=seller) cart =", "response = client_seller.delete(f\"/delivery_addresses/{address.id}\") assert response.status_code == 400 assert response.json() == { \"message\": \"Cannot", "value def test_partially_edit_someone_else_delivery_address( client_buyer, seller, buyer, faker ): address = baker.make_recipe(\"delivery_addresses.delivery_address\", user=seller) response", "\"street\": faker.street_address(), \"zip\": faker.zipcode(), } response = client_buyer.put(f\"/delivery_addresses/{address.id}\", data) assert response.status_code == 404", "address.street == data[\"street\"] assert address.zip == data[\"zip\"] def test_edit_someone_else_delivery_address(client_buyer, seller, buyer, faker): address", "\"city\": data[\"city\"], } def test_edit_delivery_address(client_seller, seller, faker): address = baker.make_recipe(\"delivery_addresses.delivery_address\", user=seller) data =", "\"street\": data[\"street\"], \"zip\": data[\"zip\"], \"city\": data[\"city\"], } def test_edit_delivery_address(client_seller, seller, faker): address =", "assert response.status_code == 404 def test_delete_delivery_address(client_seller, seller, faker): address = baker.make_recipe(\"delivery_addresses.delivery_address\", user=seller) response", "user=buyer) response = client_seller.get(f\"/delivery_addresses/{address.id}\") assert response.status_code == 404 def test_add_delivery_address(client_seller, seller, faker): data", "= client_buyer.patch( f\"/delivery_addresses/{address.id}\", {\"city\": faker.city()} ) assert response.status_code == 404 def test_delete_delivery_address(client_seller, seller,", "\"message\": \"Cannot be deleted due to protected related entities.\", \"code\": \"protected_error\", } def", "\"id\": address_1.id, \"street\": address_1.street, \"zip\": address_1.zip, }, { \"city\": address_2.city, \"companyName\": address_2.company_name, \"id\":", "buyer): address = baker.make_recipe(\"delivery_addresses.delivery_address\", user=buyer) response = client_seller.get(f\"/delivery_addresses/{address.id}\") assert response.status_code == 404 def", "data) assert response.status_code == 404 def test_partially_edit_delivery_address(client_seller, seller, faker): address = baker.make_recipe(\"delivery_addresses.delivery_address\", user=seller)", "faker.zipcode(), } response = client_seller.put(f\"/delivery_addresses/{address.id}\", data) assert response.json() == { \"id\": address.id, \"companyName\":", "\"companyName\": address_1.company_name, \"id\": address_1.id, \"street\": address_1.street, \"zip\": address_1.zip, }, { \"city\": address_2.city, \"companyName\":", "faker.street_address(), \"zip\": faker.zipcode(), } response = client_seller.put(f\"/delivery_addresses/{address.id}\", data) assert response.json() == { \"id\":", "response.json() == { \"id\": address.id, \"companyName\": data[\"companyName\"], \"street\": data[\"street\"], \"zip\": data[\"zip\"], \"city\": data[\"city\"],", "} def test_edit_delivery_address(client_seller, seller, faker): address = baker.make_recipe(\"delivery_addresses.delivery_address\", user=seller) data = { \"city\":", "address_1, address_2 = baker.make_recipe( \"delivery_addresses.delivery_address\", user=seller, _quantity=2 ) response = client_seller.get(\"/delivery_addresses\") assert response.json()", "assert response.json() == { \"message\": \"Cannot be deleted due to protected related entities.\",", "related entities.\", \"code\": \"protected_error\", } def test_delete_someone_else_delivery_address(client_seller, buyer, faker): address = baker.make_recipe(\"delivery_addresses.delivery_address\", user=buyer)", "} def test_delete_someone_else_delivery_address(client_seller, buyer, faker): address = baker.make_recipe(\"delivery_addresses.delivery_address\", user=buyer) response = client_seller.delete(f\"/delivery_addresses/{address.id}\") assert", "== { \"message\": \"Cannot be deleted due to protected related entities.\", \"code\": \"protected_error\",", "[ { \"city\": address_1.city, \"companyName\": address_1.company_name, \"id\": address_1.id, \"street\": address_1.street, \"zip\": address_1.zip, },", "= { \"city\": faker.city(), \"companyName\": faker.company(), \"street\": faker.street_address(), \"zip\": faker.zipcode(), } response =", "model_bakery import baker from delivery_addresses.models import DeliveryAddress pytestmark = pytest.mark.django_db def test_get_own_delivery_addresses(client_seller, seller,", "faker.city()} ) assert response.status_code == 404 def test_delete_delivery_address(client_seller, seller, faker): address = baker.make_recipe(\"delivery_addresses.delivery_address\",", "] def test_get_own_delivery_address(client_seller, seller): address = baker.make_recipe(\"delivery_addresses.delivery_address\", user=seller) response = client_seller.get(f\"/delivery_addresses/{address.id}\") assert response.json()", "\"zip\": data[\"zip\"], \"city\": data[\"city\"], } address.refresh_from_db() assert address.city == data[\"city\"] assert address.company_name ==", "<reponame>stanwood/traidoo-api<filename>delivery_addresses/tests/test_delivery_address.py import random import pytest from model_bakery import baker from delivery_addresses.models import DeliveryAddress", "= client_seller.get(\"/delivery_addresses\") assert response.json() == [ { \"city\": address_1.city, \"companyName\": address_1.company_name, \"id\": address_1.id,", "protected related entities.\", \"code\": \"protected_error\", } def test_delete_someone_else_delivery_address(client_seller, buyer, faker): address = baker.make_recipe(\"delivery_addresses.delivery_address\",", "response = client_buyer.put(f\"/delivery_addresses/{address.id}\", data) assert response.status_code == 404 def test_partially_edit_delivery_address(client_seller, seller, faker): address", "data[\"zip\"], \"city\": data[\"city\"], } def test_edit_delivery_address(client_seller, seller, faker): address = baker.make_recipe(\"delivery_addresses.delivery_address\", user=seller) data", "\"id\": DeliveryAddress.objects.first().id, \"companyName\": data[\"companyName\"], \"street\": data[\"street\"], \"zip\": data[\"zip\"], \"city\": data[\"city\"], } def test_edit_delivery_address(client_seller,", "response.json()[key] == value def test_partially_edit_someone_else_delivery_address( client_buyer, seller, buyer, faker ): address = baker.make_recipe(\"delivery_addresses.delivery_address\",", "seller, faker): address = baker.make_recipe(\"delivery_addresses.delivery_address\", user=seller) response = client_seller.delete(f\"/delivery_addresses/{address.id}\") with pytest.raises(DeliveryAddress.DoesNotExist): address.refresh_from_db() def", "address.zip, } def test_get_someone_else_delivery_address(client_seller, buyer): address = baker.make_recipe(\"delivery_addresses.delivery_address\", user=buyer) response = client_seller.get(f\"/delivery_addresses/{address.id}\") assert", "= client_seller.get(f\"/delivery_addresses/{address.id}\") assert response.status_code == 404 def test_add_delivery_address(client_seller, seller, faker): data = {", "faker): address = baker.make_recipe(\"delivery_addresses.delivery_address\", user=buyer) response = client_seller.delete(f\"/delivery_addresses/{address.id}\") assert response.status_code == 404 assert", "assert address.zip == data[\"zip\"] def test_edit_someone_else_delivery_address(client_buyer, seller, buyer, faker): address = baker.make_recipe(\"delivery_addresses.delivery_address\", user=seller)", "f\"/delivery_addresses/{address.id}\", {\"city\": faker.city()} ) assert response.status_code == 404 def test_delete_delivery_address(client_seller, seller, faker): address", "from model_bakery import baker from delivery_addresses.models import DeliveryAddress pytestmark = pytest.mark.django_db def test_get_own_delivery_addresses(client_seller,", "def test_get_own_delivery_address(client_seller, seller): address = baker.make_recipe(\"delivery_addresses.delivery_address\", user=seller) response = client_seller.get(f\"/delivery_addresses/{address.id}\") assert response.json() ==", "faker.city(), \"companyName\": faker.company(), \"street\": faker.street_address(), \"zip\": faker.zipcode(), } response = client_seller.put(f\"/delivery_addresses/{address.id}\", data) assert", "test_get_someone_else_delivery_address(client_seller, buyer): address = baker.make_recipe(\"delivery_addresses.delivery_address\", user=buyer) response = client_seller.get(f\"/delivery_addresses/{address.id}\") assert response.status_code == 404", "= client_seller.put(f\"/delivery_addresses/{address.id}\", data) assert response.json() == { \"id\": address.id, \"companyName\": data[\"companyName\"], \"street\": data[\"street\"],", "response.json() == { \"message\": \"Cannot be deleted due to protected related entities.\", \"code\":", "faker.zipcode(), } response = client_seller.post(\"/delivery_addresses\", data) assert response.json() == { \"id\": DeliveryAddress.objects.first().id, \"companyName\":", "response.json() == [ { \"city\": address_1.city, \"companyName\": address_1.company_name, \"id\": address_1.id, \"street\": address_1.street, \"zip\":", "\"companyName\": data[\"companyName\"], \"street\": data[\"street\"], \"zip\": data[\"zip\"], \"city\": data[\"city\"], } address.refresh_from_db() assert address.city ==", "\"companyName\": faker.company(), \"street\": faker.street_address(), \"zip\": faker.zipcode(), } response = client_seller.post(\"/delivery_addresses\", data) assert response.json()", "== 404 def test_partially_edit_delivery_address(client_seller, seller, faker): address = baker.make_recipe(\"delivery_addresses.delivery_address\", user=seller) data = {", "faker.street_address(), \"zip\": faker.zipcode(), } response = client_buyer.put(f\"/delivery_addresses/{address.id}\", data) assert response.status_code == 404 def", "data[\"zip\"] def test_edit_someone_else_delivery_address(client_buyer, seller, buyer, faker): address = baker.make_recipe(\"delivery_addresses.delivery_address\", user=seller) data = {", "\"city\": address.city, \"companyName\": address.company_name, \"id\": address.id, \"street\": address.street, \"zip\": address.zip, } def test_get_someone_else_delivery_address(client_seller,", "data[\"city\"], } address.refresh_from_db() assert address.city == data[\"city\"] assert address.company_name == data[\"companyName\"] assert address.street", "{ \"city\": address_1.city, \"companyName\": address_1.company_name, \"id\": address_1.id, \"street\": address_1.street, \"zip\": address_1.zip, }, {", "== data[\"zip\"] def test_edit_someone_else_delivery_address(client_buyer, seller, buyer, faker): address = baker.make_recipe(\"delivery_addresses.delivery_address\", user=seller) data =", "address = baker.make_recipe(\"delivery_addresses.delivery_address\", user=seller) response = client_seller.delete(f\"/delivery_addresses/{address.id}\") with pytest.raises(DeliveryAddress.DoesNotExist): address.refresh_from_db() def test_delete_delivery_address_when_in_use(client_seller, seller,", "faker.zipcode(), } response = client_buyer.put(f\"/delivery_addresses/{address.id}\", data) assert response.status_code == 404 def test_partially_edit_delivery_address(client_seller, seller,", "def test_edit_delivery_address(client_seller, seller, faker): address = baker.make_recipe(\"delivery_addresses.delivery_address\", user=seller) data = { \"city\": faker.city(),", "\"city\": faker.city(), \"companyName\": faker.company(), \"street\": faker.street_address(), \"zip\": faker.zipcode(), } for key, value in", "\"street\": faker.street_address(), \"zip\": faker.zipcode(), } for key, value in data.items(): response = client_seller.patch(f\"/delivery_addresses/{address.id}\",", "for key, value in data.items(): response = client_seller.patch(f\"/delivery_addresses/{address.id}\", data) assert response.json()[key] == value", "def test_edit_someone_else_delivery_address(client_buyer, seller, buyer, faker): address = baker.make_recipe(\"delivery_addresses.delivery_address\", user=seller) data = { \"city\":", "client_seller.get(f\"/delivery_addresses/{address.id}\") assert response.json() == { \"city\": address.city, \"companyName\": address.company_name, \"id\": address.id, \"street\": address.street,", "= baker.make_recipe(\"delivery_addresses.delivery_address\", user=seller) data = { \"city\": faker.city(), \"companyName\": faker.company(), \"street\": faker.street_address(), \"zip\":", "address_1.id, \"street\": address_1.street, \"zip\": address_1.zip, }, { \"city\": address_2.city, \"companyName\": address_2.company_name, \"id\": address_2.id,", "\"zip\": faker.zipcode(), } response = client_seller.put(f\"/delivery_addresses/{address.id}\", data) assert response.json() == { \"id\": address.id,", "= client_seller.post(\"/delivery_addresses\", data) assert response.json() == { \"id\": DeliveryAddress.objects.first().id, \"companyName\": data[\"companyName\"], \"street\": data[\"street\"],", "baker.make_recipe( \"delivery_addresses.delivery_address\", user=seller, _quantity=2 ) response = client_seller.get(\"/delivery_addresses\") assert response.json() == [ {", "{ \"city\": address_2.city, \"companyName\": address_2.company_name, \"id\": address_2.id, \"street\": address_2.street, \"zip\": address_2.zip, }, ]", "faker): address = baker.make_recipe(\"delivery_addresses.delivery_address\", user=seller) data = { \"city\": faker.city(), \"companyName\": faker.company(), \"street\":", "address.zip == data[\"zip\"] def test_edit_someone_else_delivery_address(client_buyer, seller, buyer, faker): address = baker.make_recipe(\"delivery_addresses.delivery_address\", user=seller) data", "test_partially_edit_delivery_address(client_seller, seller, faker): address = baker.make_recipe(\"delivery_addresses.delivery_address\", user=seller) data = { \"city\": faker.city(), \"companyName\":", "client_buyer, seller, buyer, faker ): address = baker.make_recipe(\"delivery_addresses.delivery_address\", user=seller) response = client_buyer.patch( f\"/delivery_addresses/{address.id}\",", "{ \"id\": address.id, \"companyName\": data[\"companyName\"], \"street\": data[\"street\"], \"zip\": data[\"zip\"], \"city\": data[\"city\"], } address.refresh_from_db()", "response = client_seller.get(\"/delivery_addresses\") assert response.json() == [ { \"city\": address_1.city, \"companyName\": address_1.company_name, \"id\":", "400 assert response.json() == { \"message\": \"Cannot be deleted due to protected related", "faker.company(), \"street\": faker.street_address(), \"zip\": faker.zipcode(), } response = client_seller.post(\"/delivery_addresses\", data) assert response.json() ==", "seller): address = baker.make_recipe(\"delivery_addresses.delivery_address\", user=seller) response = client_seller.get(f\"/delivery_addresses/{address.id}\") assert response.json() == { \"city\":", "baker.make_recipe(\"carts.cart\", delivery_address=address, user=seller) response = client_seller.delete(f\"/delivery_addresses/{address.id}\") assert response.status_code == 400 assert response.json() ==", "def test_get_someone_else_delivery_address(client_seller, buyer): address = baker.make_recipe(\"delivery_addresses.delivery_address\", user=buyer) response = client_seller.get(f\"/delivery_addresses/{address.id}\") assert response.status_code ==", "data[\"city\"] assert address.company_name == data[\"companyName\"] assert address.street == data[\"street\"] assert address.zip == data[\"zip\"]", "user=buyer) address_1, address_2 = baker.make_recipe( \"delivery_addresses.delivery_address\", user=seller, _quantity=2 ) response = client_seller.get(\"/delivery_addresses\") assert", "client_buyer.patch( f\"/delivery_addresses/{address.id}\", {\"city\": faker.city()} ) assert response.status_code == 404 def test_delete_delivery_address(client_seller, seller, faker):", "assert response.json()[key] == value def test_partially_edit_someone_else_delivery_address( client_buyer, seller, buyer, faker ): address =", "client_buyer.put(f\"/delivery_addresses/{address.id}\", data) assert response.status_code == 404 def test_partially_edit_delivery_address(client_seller, seller, faker): address = baker.make_recipe(\"delivery_addresses.delivery_address\",", "seller, buyer, faker): address = baker.make_recipe(\"delivery_addresses.delivery_address\", user=seller) data = { \"city\": faker.city(), \"companyName\":", "= baker.make_recipe(\"delivery_addresses.delivery_address\", user=seller) response = client_seller.delete(f\"/delivery_addresses/{address.id}\") with pytest.raises(DeliveryAddress.DoesNotExist): address.refresh_from_db() def test_delete_delivery_address_when_in_use(client_seller, seller, faker):", "\"protected_error\", } def test_delete_someone_else_delivery_address(client_seller, buyer, faker): address = baker.make_recipe(\"delivery_addresses.delivery_address\", user=buyer) response = client_seller.delete(f\"/delivery_addresses/{address.id}\")", "def test_get_own_delivery_addresses(client_seller, seller, buyer): baker.make_recipe(\"delivery_addresses.delivery_address\", user=buyer) address_1, address_2 = baker.make_recipe( \"delivery_addresses.delivery_address\", user=seller, _quantity=2", "import baker from delivery_addresses.models import DeliveryAddress pytestmark = pytest.mark.django_db def test_get_own_delivery_addresses(client_seller, seller, buyer):", "_quantity=2 ) response = client_seller.get(\"/delivery_addresses\") assert response.json() == [ { \"city\": address_1.city, \"companyName\":", ") response = client_seller.get(\"/delivery_addresses\") assert response.json() == [ { \"city\": address_1.city, \"companyName\": address_1.company_name,", "def test_partially_edit_delivery_address(client_seller, seller, faker): address = baker.make_recipe(\"delivery_addresses.delivery_address\", user=seller) data = { \"city\": faker.city(),", "): address = baker.make_recipe(\"delivery_addresses.delivery_address\", user=seller) response = client_buyer.patch( f\"/delivery_addresses/{address.id}\", {\"city\": faker.city()} ) assert", "user=seller, _quantity=2 ) response = client_seller.get(\"/delivery_addresses\") assert response.json() == [ { \"city\": address_1.city,", "test_delete_delivery_address(client_seller, seller, faker): address = baker.make_recipe(\"delivery_addresses.delivery_address\", user=seller) response = client_seller.delete(f\"/delivery_addresses/{address.id}\") with pytest.raises(DeliveryAddress.DoesNotExist): address.refresh_from_db()", "{ \"city\": faker.city(), \"companyName\": faker.company(), \"street\": faker.street_address(), \"zip\": faker.zipcode(), } response = client_seller.put(f\"/delivery_addresses/{address.id}\",", "test_get_own_delivery_address(client_seller, seller): address = baker.make_recipe(\"delivery_addresses.delivery_address\", user=seller) response = client_seller.get(f\"/delivery_addresses/{address.id}\") assert response.json() == {", "\"street\": faker.street_address(), \"zip\": faker.zipcode(), } response = client_seller.post(\"/delivery_addresses\", data) assert response.json() == {", "faker.company(), \"street\": faker.street_address(), \"zip\": faker.zipcode(), } response = client_seller.put(f\"/delivery_addresses/{address.id}\", data) assert response.json() ==", "assert response.status_code == 404 def test_add_delivery_address(client_seller, seller, faker): data = { \"city\": faker.city(),", "faker.city(), \"companyName\": faker.company(), \"street\": faker.street_address(), \"zip\": faker.zipcode(), } for key, value in data.items():", "address_2 = baker.make_recipe( \"delivery_addresses.delivery_address\", user=seller, _quantity=2 ) response = client_seller.get(\"/delivery_addresses\") assert response.json() ==", "response = client_seller.put(f\"/delivery_addresses/{address.id}\", data) assert response.json() == { \"id\": address.id, \"companyName\": data[\"companyName\"], \"street\":", "\"companyName\": faker.company(), \"street\": faker.street_address(), \"zip\": faker.zipcode(), } for key, value in data.items(): response", "faker.company(), \"street\": faker.street_address(), \"zip\": faker.zipcode(), } for key, value in data.items(): response =", "address.street, \"zip\": address.zip, } def test_get_someone_else_delivery_address(client_seller, buyer): address = baker.make_recipe(\"delivery_addresses.delivery_address\", user=buyer) response =", "assert response.status_code == 404 def test_partially_edit_delivery_address(client_seller, seller, faker): address = baker.make_recipe(\"delivery_addresses.delivery_address\", user=seller) data", "test_get_own_delivery_addresses(client_seller, seller, buyer): baker.make_recipe(\"delivery_addresses.delivery_address\", user=buyer) address_1, address_2 = baker.make_recipe( \"delivery_addresses.delivery_address\", user=seller, _quantity=2 )", "\"companyName\": address.company_name, \"id\": address.id, \"street\": address.street, \"zip\": address.zip, } def test_get_someone_else_delivery_address(client_seller, buyer): address", "\"companyName\": faker.company(), \"street\": faker.street_address(), \"zip\": faker.zipcode(), } response = client_seller.put(f\"/delivery_addresses/{address.id}\", data) assert response.json()", "== { \"id\": DeliveryAddress.objects.first().id, \"companyName\": data[\"companyName\"], \"street\": data[\"street\"], \"zip\": data[\"zip\"], \"city\": data[\"city\"], }", "test_partially_edit_someone_else_delivery_address( client_buyer, seller, buyer, faker ): address = baker.make_recipe(\"delivery_addresses.delivery_address\", user=seller) response = client_buyer.patch(", "== value def test_partially_edit_someone_else_delivery_address( client_buyer, seller, buyer, faker ): address = baker.make_recipe(\"delivery_addresses.delivery_address\", user=seller)", "to protected related entities.\", \"code\": \"protected_error\", } def test_delete_someone_else_delivery_address(client_seller, buyer, faker): address =", "address_2.street, \"zip\": address_2.zip, }, ] def test_get_own_delivery_address(client_seller, seller): address = baker.make_recipe(\"delivery_addresses.delivery_address\", user=seller) response", "{ \"city\": faker.city(), \"companyName\": faker.company(), \"street\": faker.street_address(), \"zip\": faker.zipcode(), } for key, value", "\"street\": address_1.street, \"zip\": address_1.zip, }, { \"city\": address_2.city, \"companyName\": address_2.company_name, \"id\": address_2.id, \"street\":", "== { \"city\": address.city, \"companyName\": address.company_name, \"id\": address.id, \"street\": address.street, \"zip\": address.zip, }", "address_1.zip, }, { \"city\": address_2.city, \"companyName\": address_2.company_name, \"id\": address_2.id, \"street\": address_2.street, \"zip\": address_2.zip,", "random import pytest from model_bakery import baker from delivery_addresses.models import DeliveryAddress pytestmark =", "seller, faker): data = { \"city\": faker.city(), \"companyName\": faker.company(), \"street\": faker.street_address(), \"zip\": faker.zipcode(),", "buyer, faker): address = baker.make_recipe(\"delivery_addresses.delivery_address\", user=buyer) response = client_seller.delete(f\"/delivery_addresses/{address.id}\") assert response.status_code == 404", "\"zip\": faker.zipcode(), } response = client_buyer.put(f\"/delivery_addresses/{address.id}\", data) assert response.status_code == 404 def test_partially_edit_delivery_address(client_seller,", "response.json() == { \"id\": DeliveryAddress.objects.first().id, \"companyName\": data[\"companyName\"], \"street\": data[\"street\"], \"zip\": data[\"zip\"], \"city\": data[\"city\"],", "address = baker.make_recipe(\"delivery_addresses.delivery_address\", user=seller) response = client_buyer.patch( f\"/delivery_addresses/{address.id}\", {\"city\": faker.city()} ) assert response.status_code", "\"city\": faker.city(), \"companyName\": faker.company(), \"street\": faker.street_address(), \"zip\": faker.zipcode(), } response = client_seller.put(f\"/delivery_addresses/{address.id}\", data)", "def test_delete_delivery_address(client_seller, seller, faker): address = baker.make_recipe(\"delivery_addresses.delivery_address\", user=seller) response = client_seller.delete(f\"/delivery_addresses/{address.id}\") with pytest.raises(DeliveryAddress.DoesNotExist):" ]
[ "pygame.image.load(ip4).convert_alpha() def draw_barrier(self): food_obj1 = pygame.Rect(int(self.lis[0].x*cell_size),int(self.lis[0].y*cell_size),cell_size,cell_size) food_obj2 = pygame.Rect(int(self.lis[1].x*cell_size),int(self.lis[1].y*cell_size),cell_size,cell_size) food_obj3 = pygame.Rect(int(self.lis[2].x*cell_size),int(self.lis[2].y*cell_size),cell_size,cell_size) food_obj4", "food_obj4) def change_position(self): xPos1 = random.randint(0,cell_number - 2) yPos1 = random.randint(0,cell_number - 2)", "= yPos1 xPos4 = xPos1 + 1 yPos4 = yPos1 + 1 self.lis", "self.pyScreen.blit(self.ft1, food_obj1) self.pyScreen.blit(self.ft2, food_obj2) self.pyScreen.blit(self.ft3, food_obj3) self.pyScreen.blit(self.ft4, food_obj4) def change_position(self): xPos1 = random.randint(0,cell_number", "1 yPos4 = yPos1 + 1 self.lis = [Vector2(xPos1,yPos1),Vector2(xPos2,yPos2),Vector2(xPos3,yPos3),Vector2(xPos4,yPos4)] self.pyScreen = screen def", "self.pyScreen.blit(self.ft3, food_obj3) self.pyScreen.blit(self.ft4, food_obj4) def change_position(self): xPos1 = random.randint(0,cell_number - 2) yPos1 =", "self.pyScreen.blit(self.ft4, food_obj4) def change_position(self): xPos1 = random.randint(0,cell_number - 2) yPos1 = random.randint(0,cell_number -", "import xSize, ySize, cell_number, cell_size class NonEatable(): def __init__(self, screen, ip1,ip2,ip3,ip4): # Lade", "and yPos1 == 10): return False if(xPos1 == 7 and yPos1 == 10):", "''' self.ft1 = pygame.image.load(ip1).convert_alpha() self.ft2 = pygame.image.load(ip2).convert_alpha() self.ft3 = pygame.image.load(ip3).convert_alpha() self.ft4 = pygame.image.load(ip4).convert_alpha()", "#from .config import xSize, ySize, cell_size, cell_number from .loc_conf import xSize, ySize, cell_number,", "10): return False if(xPos1 == 7 and yPos1 == 10): return False if(xPos1", "== 6 and yPos1 == 10): return False if(xPos1 == 7 and yPos1", "yPos1 == 10): return False if(xPos1 == 7 and yPos1 == 10): return", "food_obj3 = pygame.Rect(int(self.lis[2].x*cell_size),int(self.lis[2].y*cell_size),cell_size,cell_size) food_obj4 = pygame.Rect(int(self.lis[3].x*cell_size),int(self.lis[3].y*cell_size),cell_size,cell_size) self.pyScreen.blit(self.ft1, food_obj1) self.pyScreen.blit(self.ft2, food_obj2) self.pyScreen.blit(self.ft3, food_obj3) self.pyScreen.blit(self.ft4,", "= random.randint(0,cell_number - 2) yPos1 = random.randint(0,cell_number - 2) xPos2 = xPos1 yPos2", "2) xPos2 = xPos1 yPos2 = yPos1 + 1 xPos3 = xPos1 +", "== 10): return False if(xPos1 == 7 and yPos1 == 10): return False", "ip1,ip2,ip3,ip4): ''' Laden der Texutren ''' self.ft1 = pygame.image.load(ip1).convert_alpha() self.ft2 = pygame.image.load(ip2).convert_alpha() self.ft3", "= random.randint(0,cell_number - 2) yPos1 = random.randint(0,cell_number - 2) while(not self.is_start_pos_ok(xPos1,yPos1)): xPos1 =", "= xPos1 + 1 yPos3 = yPos1 xPos4 = xPos1 + 1 yPos4", "- 2) yPos1 = random.randint(0,cell_number - 2) xPos2 = xPos1 yPos2 = yPos1", "= xPos1 + 1 yPos4 = yPos1 + 1 self.lis = [Vector2(xPos1,yPos1),Vector2(xPos2,yPos2),Vector2(xPos3,yPos3),Vector2(xPos4,yPos4)] self.pyScreen", "from .loc_conf import xSize, ySize, cell_number, cell_size class NonEatable(): def __init__(self, screen, ip1,ip2,ip3,ip4):", "+ 1 self.lis = [Vector2(xPos1,yPos1),Vector2(xPos2,yPos2),Vector2(xPos3,yPos3),Vector2(xPos4,yPos4)] self.pyScreen = screen def is_start_pos_ok(self,xPos1,yPos1): if(xPos1 == 6", "if(xPos1 == 6 and yPos1 == 10): return False if(xPos1 == 7 and", "== 10): return False if(xPos1 == 8 and yPos1 == 10): return False", "draw_barrier(self): food_obj1 = pygame.Rect(int(self.lis[0].x*cell_size),int(self.lis[0].y*cell_size),cell_size,cell_size) food_obj2 = pygame.Rect(int(self.lis[1].x*cell_size),int(self.lis[1].y*cell_size),cell_size,cell_size) food_obj3 = pygame.Rect(int(self.lis[2].x*cell_size),int(self.lis[2].y*cell_size),cell_size,cell_size) food_obj4 = pygame.Rect(int(self.lis[3].x*cell_size),int(self.lis[3].y*cell_size),cell_size,cell_size)", "xPos1 = random.randint(0,cell_number - 2) yPos1 = random.randint(0,cell_number - 2) xPos2 = xPos1", "yPos4 = yPos1 + 1 self.lis = [Vector2(xPos1,yPos1),Vector2(xPos2,yPos2),Vector2(xPos3,yPos3),Vector2(xPos4,yPos4)] self.pyScreen = screen def is_start_pos_ok(self,xPos1,yPos1):", "Texutren ''' self.ft1 = pygame.image.load(ip1).convert_alpha() self.ft2 = pygame.image.load(ip2).convert_alpha() self.ft3 = pygame.image.load(ip3).convert_alpha() self.ft4 =", "yPos2 = yPos1 + 1 xPos3 = xPos1 + 1 yPos3 = yPos1", "random from pygame.math import Vector2 #from .config import xSize, ySize, cell_size, cell_number from", "import random from pygame.math import Vector2 #from .config import xSize, ySize, cell_size, cell_number", "Futter xPos1 = random.randint(0,cell_number - 2) yPos1 = random.randint(0,cell_number - 2) while(not self.is_start_pos_ok(xPos1,yPos1)):", "7 and yPos1 == 10): return False if(xPos1 == 8 and yPos1 ==", "== 8 and yPos1 == 10): return False return True def _load_texture(self, ip1,ip2,ip3,ip4):", "def change_position(self): xPos1 = random.randint(0,cell_number - 2) yPos1 = random.randint(0,cell_number - 2) self.lis", "+ 1 yPos4 = yPos1 + 1 self.lis = [Vector2(xPos1,yPos1),Vector2(xPos2,yPos2),Vector2(xPos3,yPos3),Vector2(xPos4,yPos4)] self.pyScreen = screen", "der Texutren ''' self.ft1 = pygame.image.load(ip1).convert_alpha() self.ft2 = pygame.image.load(ip2).convert_alpha() self.ft3 = pygame.image.load(ip3).convert_alpha() self.ft4", "Vector2 #from .config import xSize, ySize, cell_size, cell_number from .loc_conf import xSize, ySize,", "ySize, cell_size, cell_number from .loc_conf import xSize, ySize, cell_number, cell_size class NonEatable(): def", "False if(xPos1 == 7 and yPos1 == 10): return False if(xPos1 == 8", "xPos1 yPos2 = yPos1 + 1 xPos3 = xPos1 + 1 yPos3 =", "== 7 and yPos1 == 10): return False if(xPos1 == 8 and yPos1", "[Vector2(xPos1,yPos1),Vector2(xPos2,yPos2),Vector2(xPos3,yPos3),Vector2(xPos4,yPos4)] self.pyScreen = screen def is_start_pos_ok(self,xPos1,yPos1): if(xPos1 == 6 and yPos1 == 10):", "xPos2 = xPos1 yPos2 = yPos1 + 1 xPos3 = xPos1 + 1", "+ 1 xPos3 = xPos1 + 1 yPos3 = yPos1 xPos4 = xPos1", "self.pyScreen.blit(self.ft2, food_obj2) self.pyScreen.blit(self.ft3, food_obj3) self.pyScreen.blit(self.ft4, food_obj4) def change_position(self): xPos1 = random.randint(0,cell_number - 2)", "= pygame.Rect(int(self.lis[3].x*cell_size),int(self.lis[3].y*cell_size),cell_size,cell_size) self.pyScreen.blit(self.ft1, food_obj1) self.pyScreen.blit(self.ft2, food_obj2) self.pyScreen.blit(self.ft3, food_obj3) self.pyScreen.blit(self.ft4, food_obj4) def change_position(self): xPos1", "pygame.Rect(int(self.lis[0].x*cell_size),int(self.lis[0].y*cell_size),cell_size,cell_size) food_obj2 = pygame.Rect(int(self.lis[1].x*cell_size),int(self.lis[1].y*cell_size),cell_size,cell_size) food_obj3 = pygame.Rect(int(self.lis[2].x*cell_size),int(self.lis[2].y*cell_size),cell_size,cell_size) food_obj4 = pygame.Rect(int(self.lis[3].x*cell_size),int(self.lis[3].y*cell_size),cell_size,cell_size) self.pyScreen.blit(self.ft1, food_obj1) self.pyScreen.blit(self.ft2,", "import Vector2 #from .config import xSize, ySize, cell_size, cell_number from .loc_conf import xSize,", "- 2) yPos1 = random.randint(0,cell_number - 2) while(not self.is_start_pos_ok(xPos1,yPos1)): xPos1 = random.randint(0,cell_number -", "self.lis = [Vector2(xPos1,yPos1),Vector2(xPos2,yPos2),Vector2(xPos3,yPos3),Vector2(xPos4,yPos4)] self.pyScreen = screen def is_start_pos_ok(self,xPos1,yPos1): if(xPos1 == 6 and yPos1", "while(not self.is_start_pos_ok(xPos1,yPos1)): xPos1 = random.randint(0,cell_number - 2) yPos1 = random.randint(0,cell_number - 2) xPos2", "ySize, cell_number, cell_size class NonEatable(): def __init__(self, screen, ip1,ip2,ip3,ip4): # Lade Textur self._load_texture(ip1,ip2,ip3,ip4)", "xPos4 = xPos1 + 1 yPos4 = yPos1 + 1 self.lis = [Vector2(xPos1,yPos1),Vector2(xPos2,yPos2),Vector2(xPos3,yPos3),Vector2(xPos4,yPos4)]", "6 and yPos1 == 10): return False if(xPos1 == 7 and yPos1 ==", "= [Vector2(xPos1,yPos1),Vector2(xPos2,yPos2),Vector2(xPos3,yPos3),Vector2(xPos4,yPos4)] self.pyScreen = screen def is_start_pos_ok(self,xPos1,yPos1): if(xPos1 == 6 and yPos1 ==", "False if(xPos1 == 8 and yPos1 == 10): return False return True def", "def is_start_pos_ok(self,xPos1,yPos1): if(xPos1 == 6 and yPos1 == 10): return False if(xPos1 ==", "10): return False if(xPos1 == 8 and yPos1 == 10): return False return", "random.randint(0,cell_number - 2) yPos1 = random.randint(0,cell_number - 2) while(not self.is_start_pos_ok(xPos1,yPos1)): xPos1 = random.randint(0,cell_number", "= yPos1 + 1 xPos3 = xPos1 + 1 yPos3 = yPos1 xPos4", "pygame.math import Vector2 #from .config import xSize, ySize, cell_size, cell_number from .loc_conf import", "import pygame import random from pygame.math import Vector2 #from .config import xSize, ySize,", "xPos1 + 1 yPos4 = yPos1 + 1 self.lis = [Vector2(xPos1,yPos1),Vector2(xPos2,yPos2),Vector2(xPos3,yPos3),Vector2(xPos4,yPos4)] self.pyScreen =", ".config import xSize, ySize, cell_size, cell_number from .loc_conf import xSize, ySize, cell_number, cell_size", "return False if(xPos1 == 7 and yPos1 == 10): return False if(xPos1 ==", "def _load_texture(self, ip1,ip2,ip3,ip4): ''' Laden der Texutren ''' self.ft1 = pygame.image.load(ip1).convert_alpha() self.ft2 =", "yPos1 xPos4 = xPos1 + 1 yPos4 = yPos1 + 1 self.lis =", "def __init__(self, screen, ip1,ip2,ip3,ip4): # Lade Textur self._load_texture(ip1,ip2,ip3,ip4) # Zufällige Koordinaten für Futter", "= yPos1 + 1 self.lis = [Vector2(xPos1,yPos1),Vector2(xPos2,yPos2),Vector2(xPos3,yPos3),Vector2(xPos4,yPos4)] self.pyScreen = screen def is_start_pos_ok(self,xPos1,yPos1): if(xPos1", "1 xPos3 = xPos1 + 1 yPos3 = yPos1 xPos4 = xPos1 +", "and yPos1 == 10): return False return True def _load_texture(self, ip1,ip2,ip3,ip4): ''' Laden", "random.randint(0,cell_number - 2) while(not self.is_start_pos_ok(xPos1,yPos1)): xPos1 = random.randint(0,cell_number - 2) yPos1 = random.randint(0,cell_number", "cell_number from .loc_conf import xSize, ySize, cell_number, cell_size class NonEatable(): def __init__(self, screen,", "screen def is_start_pos_ok(self,xPos1,yPos1): if(xPos1 == 6 and yPos1 == 10): return False if(xPos1", "__init__(self, screen, ip1,ip2,ip3,ip4): # Lade Textur self._load_texture(ip1,ip2,ip3,ip4) # Zufällige Koordinaten für Futter xPos1", "# Lade Textur self._load_texture(ip1,ip2,ip3,ip4) # Zufällige Koordinaten für Futter xPos1 = random.randint(0,cell_number -", "yPos1 + 1 self.lis = [Vector2(xPos1,yPos1),Vector2(xPos2,yPos2),Vector2(xPos3,yPos3),Vector2(xPos4,yPos4)] self.pyScreen = screen def is_start_pos_ok(self,xPos1,yPos1): if(xPos1 ==", "import xSize, ySize, cell_size, cell_number from .loc_conf import xSize, ySize, cell_number, cell_size class", "pygame.image.load(ip1).convert_alpha() self.ft2 = pygame.image.load(ip2).convert_alpha() self.ft3 = pygame.image.load(ip3).convert_alpha() self.ft4 = pygame.image.load(ip4).convert_alpha() def draw_barrier(self): food_obj1", "yPos1 == 10): return False if(xPos1 == 8 and yPos1 == 10): return", "- 2) xPos2 = xPos1 yPos2 = yPos1 + 1 xPos3 = xPos1", "xSize, ySize, cell_size, cell_number from .loc_conf import xSize, ySize, cell_number, cell_size class NonEatable():", "2) yPos1 = random.randint(0,cell_number - 2) while(not self.is_start_pos_ok(xPos1,yPos1)): xPos1 = random.randint(0,cell_number - 2)", "cell_size class NonEatable(): def __init__(self, screen, ip1,ip2,ip3,ip4): # Lade Textur self._load_texture(ip1,ip2,ip3,ip4) # Zufällige", "class NonEatable(): def __init__(self, screen, ip1,ip2,ip3,ip4): # Lade Textur self._load_texture(ip1,ip2,ip3,ip4) # Zufällige Koordinaten", "xPos1 + 1 yPos3 = yPos1 xPos4 = xPos1 + 1 yPos4 =", "_load_texture(self, ip1,ip2,ip3,ip4): ''' Laden der Texutren ''' self.ft1 = pygame.image.load(ip1).convert_alpha() self.ft2 = pygame.image.load(ip2).convert_alpha()", "= pygame.Rect(int(self.lis[2].x*cell_size),int(self.lis[2].y*cell_size),cell_size,cell_size) food_obj4 = pygame.Rect(int(self.lis[3].x*cell_size),int(self.lis[3].y*cell_size),cell_size,cell_size) self.pyScreen.blit(self.ft1, food_obj1) self.pyScreen.blit(self.ft2, food_obj2) self.pyScreen.blit(self.ft3, food_obj3) self.pyScreen.blit(self.ft4, food_obj4)", "xPos3 = xPos1 + 1 yPos3 = yPos1 xPos4 = xPos1 + 1", "- 2) while(not self.is_start_pos_ok(xPos1,yPos1)): xPos1 = random.randint(0,cell_number - 2) yPos1 = random.randint(0,cell_number -", "random.randint(0,cell_number - 2) xPos2 = xPos1 yPos2 = yPos1 + 1 xPos3 =", "cell_number, cell_size class NonEatable(): def __init__(self, screen, ip1,ip2,ip3,ip4): # Lade Textur self._load_texture(ip1,ip2,ip3,ip4) #", "= pygame.image.load(ip2).convert_alpha() self.ft3 = pygame.image.load(ip3).convert_alpha() self.ft4 = pygame.image.load(ip4).convert_alpha() def draw_barrier(self): food_obj1 = pygame.Rect(int(self.lis[0].x*cell_size),int(self.lis[0].y*cell_size),cell_size,cell_size)", "pygame.Rect(int(self.lis[2].x*cell_size),int(self.lis[2].y*cell_size),cell_size,cell_size) food_obj4 = pygame.Rect(int(self.lis[3].x*cell_size),int(self.lis[3].y*cell_size),cell_size,cell_size) self.pyScreen.blit(self.ft1, food_obj1) self.pyScreen.blit(self.ft2, food_obj2) self.pyScreen.blit(self.ft3, food_obj3) self.pyScreen.blit(self.ft4, food_obj4) def", "yPos1 = random.randint(0,cell_number - 2) while(not self.is_start_pos_ok(xPos1,yPos1)): xPos1 = random.randint(0,cell_number - 2) yPos1", "= pygame.image.load(ip1).convert_alpha() self.ft2 = pygame.image.load(ip2).convert_alpha() self.ft3 = pygame.image.load(ip3).convert_alpha() self.ft4 = pygame.image.load(ip4).convert_alpha() def draw_barrier(self):", "self.is_start_pos_ok(xPos1,yPos1)): xPos1 = random.randint(0,cell_number - 2) yPos1 = random.randint(0,cell_number - 2) xPos2 =", "self.ft3 = pygame.image.load(ip3).convert_alpha() self.ft4 = pygame.image.load(ip4).convert_alpha() def draw_barrier(self): food_obj1 = pygame.Rect(int(self.lis[0].x*cell_size),int(self.lis[0].y*cell_size),cell_size,cell_size) food_obj2 =", "NonEatable(): def __init__(self, screen, ip1,ip2,ip3,ip4): # Lade Textur self._load_texture(ip1,ip2,ip3,ip4) # Zufällige Koordinaten für", "screen, ip1,ip2,ip3,ip4): # Lade Textur self._load_texture(ip1,ip2,ip3,ip4) # Zufällige Koordinaten für Futter xPos1 =", "self.ft1 = pygame.image.load(ip1).convert_alpha() self.ft2 = pygame.image.load(ip2).convert_alpha() self.ft3 = pygame.image.load(ip3).convert_alpha() self.ft4 = pygame.image.load(ip4).convert_alpha() def", "für Futter xPos1 = random.randint(0,cell_number - 2) yPos1 = random.randint(0,cell_number - 2) while(not", "= xPos1 yPos2 = yPos1 + 1 xPos3 = xPos1 + 1 yPos3", "10): return False return True def _load_texture(self, ip1,ip2,ip3,ip4): ''' Laden der Texutren '''", "food_obj1) self.pyScreen.blit(self.ft2, food_obj2) self.pyScreen.blit(self.ft3, food_obj3) self.pyScreen.blit(self.ft4, food_obj4) def change_position(self): xPos1 = random.randint(0,cell_number -", "cell_size, cell_number from .loc_conf import xSize, ySize, cell_number, cell_size class NonEatable(): def __init__(self,", "# Zufällige Koordinaten für Futter xPos1 = random.randint(0,cell_number - 2) yPos1 = random.randint(0,cell_number", "pygame.image.load(ip2).convert_alpha() self.ft3 = pygame.image.load(ip3).convert_alpha() self.ft4 = pygame.image.load(ip4).convert_alpha() def draw_barrier(self): food_obj1 = pygame.Rect(int(self.lis[0].x*cell_size),int(self.lis[0].y*cell_size),cell_size,cell_size) food_obj2", "self._load_texture(ip1,ip2,ip3,ip4) # Zufällige Koordinaten für Futter xPos1 = random.randint(0,cell_number - 2) yPos1 =", "pygame.Rect(int(self.lis[3].x*cell_size),int(self.lis[3].y*cell_size),cell_size,cell_size) self.pyScreen.blit(self.ft1, food_obj1) self.pyScreen.blit(self.ft2, food_obj2) self.pyScreen.blit(self.ft3, food_obj3) self.pyScreen.blit(self.ft4, food_obj4) def change_position(self): xPos1 =", "yPos1 = random.randint(0,cell_number - 2) xPos2 = xPos1 yPos2 = yPos1 + 1", "food_obj1 = pygame.Rect(int(self.lis[0].x*cell_size),int(self.lis[0].y*cell_size),cell_size,cell_size) food_obj2 = pygame.Rect(int(self.lis[1].x*cell_size),int(self.lis[1].y*cell_size),cell_size,cell_size) food_obj3 = pygame.Rect(int(self.lis[2].x*cell_size),int(self.lis[2].y*cell_size),cell_size,cell_size) food_obj4 = pygame.Rect(int(self.lis[3].x*cell_size),int(self.lis[3].y*cell_size),cell_size,cell_size) self.pyScreen.blit(self.ft1,", "''' Laden der Texutren ''' self.ft1 = pygame.image.load(ip1).convert_alpha() self.ft2 = pygame.image.load(ip2).convert_alpha() self.ft3 =", "Koordinaten für Futter xPos1 = random.randint(0,cell_number - 2) yPos1 = random.randint(0,cell_number - 2)", "is_start_pos_ok(self,xPos1,yPos1): if(xPos1 == 6 and yPos1 == 10): return False if(xPos1 == 7", "food_obj4 = pygame.Rect(int(self.lis[3].x*cell_size),int(self.lis[3].y*cell_size),cell_size,cell_size) self.pyScreen.blit(self.ft1, food_obj1) self.pyScreen.blit(self.ft2, food_obj2) self.pyScreen.blit(self.ft3, food_obj3) self.pyScreen.blit(self.ft4, food_obj4) def change_position(self):", "xPos1 = random.randint(0,cell_number - 2) yPos1 = random.randint(0,cell_number - 2) self.lis = [Vector2(xPos1,yPos1),Vector2(xPos1,yPos1+1),Vector2(xPos1+1,yPos1),Vector2(xPos1+1,yPos1+1)]", "pygame.image.load(ip3).convert_alpha() self.ft4 = pygame.image.load(ip4).convert_alpha() def draw_barrier(self): food_obj1 = pygame.Rect(int(self.lis[0].x*cell_size),int(self.lis[0].y*cell_size),cell_size,cell_size) food_obj2 = pygame.Rect(int(self.lis[1].x*cell_size),int(self.lis[1].y*cell_size),cell_size,cell_size) food_obj3", "1 self.lis = [Vector2(xPos1,yPos1),Vector2(xPos2,yPos2),Vector2(xPos3,yPos3),Vector2(xPos4,yPos4)] self.pyScreen = screen def is_start_pos_ok(self,xPos1,yPos1): if(xPos1 == 6 and", "yPos3 = yPos1 xPos4 = xPos1 + 1 yPos4 = yPos1 + 1", "if(xPos1 == 8 and yPos1 == 10): return False return True def _load_texture(self,", "Lade Textur self._load_texture(ip1,ip2,ip3,ip4) # Zufällige Koordinaten für Futter xPos1 = random.randint(0,cell_number - 2)", "return False return True def _load_texture(self, ip1,ip2,ip3,ip4): ''' Laden der Texutren ''' self.ft1", "Zufällige Koordinaten für Futter xPos1 = random.randint(0,cell_number - 2) yPos1 = random.randint(0,cell_number -", "random.randint(0,cell_number - 2) yPos1 = random.randint(0,cell_number - 2) xPos2 = xPos1 yPos2 =", "= pygame.image.load(ip3).convert_alpha() self.ft4 = pygame.image.load(ip4).convert_alpha() def draw_barrier(self): food_obj1 = pygame.Rect(int(self.lis[0].x*cell_size),int(self.lis[0].y*cell_size),cell_size,cell_size) food_obj2 = pygame.Rect(int(self.lis[1].x*cell_size),int(self.lis[1].y*cell_size),cell_size,cell_size)", "= pygame.Rect(int(self.lis[1].x*cell_size),int(self.lis[1].y*cell_size),cell_size,cell_size) food_obj3 = pygame.Rect(int(self.lis[2].x*cell_size),int(self.lis[2].y*cell_size),cell_size,cell_size) food_obj4 = pygame.Rect(int(self.lis[3].x*cell_size),int(self.lis[3].y*cell_size),cell_size,cell_size) self.pyScreen.blit(self.ft1, food_obj1) self.pyScreen.blit(self.ft2, food_obj2) self.pyScreen.blit(self.ft3,", "if(xPos1 == 7 and yPos1 == 10): return False if(xPos1 == 8 and", "xSize, ySize, cell_number, cell_size class NonEatable(): def __init__(self, screen, ip1,ip2,ip3,ip4): # Lade Textur", "self.ft2 = pygame.image.load(ip2).convert_alpha() self.ft3 = pygame.image.load(ip3).convert_alpha() self.ft4 = pygame.image.load(ip4).convert_alpha() def draw_barrier(self): food_obj1 =", "pygame.Rect(int(self.lis[1].x*cell_size),int(self.lis[1].y*cell_size),cell_size,cell_size) food_obj3 = pygame.Rect(int(self.lis[2].x*cell_size),int(self.lis[2].y*cell_size),cell_size,cell_size) food_obj4 = pygame.Rect(int(self.lis[3].x*cell_size),int(self.lis[3].y*cell_size),cell_size,cell_size) self.pyScreen.blit(self.ft1, food_obj1) self.pyScreen.blit(self.ft2, food_obj2) self.pyScreen.blit(self.ft3, food_obj3)", "yPos1 == 10): return False return True def _load_texture(self, ip1,ip2,ip3,ip4): ''' Laden der", ".loc_conf import xSize, ySize, cell_number, cell_size class NonEatable(): def __init__(self, screen, ip1,ip2,ip3,ip4): #", "Laden der Texutren ''' self.ft1 = pygame.image.load(ip1).convert_alpha() self.ft2 = pygame.image.load(ip2).convert_alpha() self.ft3 = pygame.image.load(ip3).convert_alpha()", "return False if(xPos1 == 8 and yPos1 == 10): return False return True", "food_obj2 = pygame.Rect(int(self.lis[1].x*cell_size),int(self.lis[1].y*cell_size),cell_size,cell_size) food_obj3 = pygame.Rect(int(self.lis[2].x*cell_size),int(self.lis[2].y*cell_size),cell_size,cell_size) food_obj4 = pygame.Rect(int(self.lis[3].x*cell_size),int(self.lis[3].y*cell_size),cell_size,cell_size) self.pyScreen.blit(self.ft1, food_obj1) self.pyScreen.blit(self.ft2, food_obj2)", "= pygame.Rect(int(self.lis[0].x*cell_size),int(self.lis[0].y*cell_size),cell_size,cell_size) food_obj2 = pygame.Rect(int(self.lis[1].x*cell_size),int(self.lis[1].y*cell_size),cell_size,cell_size) food_obj3 = pygame.Rect(int(self.lis[2].x*cell_size),int(self.lis[2].y*cell_size),cell_size,cell_size) food_obj4 = pygame.Rect(int(self.lis[3].x*cell_size),int(self.lis[3].y*cell_size),cell_size,cell_size) self.pyScreen.blit(self.ft1, food_obj1)", "self.ft4 = pygame.image.load(ip4).convert_alpha() def draw_barrier(self): food_obj1 = pygame.Rect(int(self.lis[0].x*cell_size),int(self.lis[0].y*cell_size),cell_size,cell_size) food_obj2 = pygame.Rect(int(self.lis[1].x*cell_size),int(self.lis[1].y*cell_size),cell_size,cell_size) food_obj3 =", "+ 1 yPos3 = yPos1 xPos4 = xPos1 + 1 yPos4 = yPos1", "True def _load_texture(self, ip1,ip2,ip3,ip4): ''' Laden der Texutren ''' self.ft1 = pygame.image.load(ip1).convert_alpha() self.ft2", "def draw_barrier(self): food_obj1 = pygame.Rect(int(self.lis[0].x*cell_size),int(self.lis[0].y*cell_size),cell_size,cell_size) food_obj2 = pygame.Rect(int(self.lis[1].x*cell_size),int(self.lis[1].y*cell_size),cell_size,cell_size) food_obj3 = pygame.Rect(int(self.lis[2].x*cell_size),int(self.lis[2].y*cell_size),cell_size,cell_size) food_obj4 =", "== 10): return False return True def _load_texture(self, ip1,ip2,ip3,ip4): ''' Laden der Texutren", "= pygame.image.load(ip4).convert_alpha() def draw_barrier(self): food_obj1 = pygame.Rect(int(self.lis[0].x*cell_size),int(self.lis[0].y*cell_size),cell_size,cell_size) food_obj2 = pygame.Rect(int(self.lis[1].x*cell_size),int(self.lis[1].y*cell_size),cell_size,cell_size) food_obj3 = pygame.Rect(int(self.lis[2].x*cell_size),int(self.lis[2].y*cell_size),cell_size,cell_size)", "= screen def is_start_pos_ok(self,xPos1,yPos1): if(xPos1 == 6 and yPos1 == 10): return False", "food_obj2) self.pyScreen.blit(self.ft3, food_obj3) self.pyScreen.blit(self.ft4, food_obj4) def change_position(self): xPos1 = random.randint(0,cell_number - 2) yPos1", "= random.randint(0,cell_number - 2) xPos2 = xPos1 yPos2 = yPos1 + 1 xPos3", "return True def _load_texture(self, ip1,ip2,ip3,ip4): ''' Laden der Texutren ''' self.ft1 = pygame.image.load(ip1).convert_alpha()", "yPos1 + 1 xPos3 = xPos1 + 1 yPos3 = yPos1 xPos4 =", "self.pyScreen = screen def is_start_pos_ok(self,xPos1,yPos1): if(xPos1 == 6 and yPos1 == 10): return", "8 and yPos1 == 10): return False return True def _load_texture(self, ip1,ip2,ip3,ip4): '''", "False return True def _load_texture(self, ip1,ip2,ip3,ip4): ''' Laden der Texutren ''' self.ft1 =", "food_obj3) self.pyScreen.blit(self.ft4, food_obj4) def change_position(self): xPos1 = random.randint(0,cell_number - 2) yPos1 = random.randint(0,cell_number", "change_position(self): xPos1 = random.randint(0,cell_number - 2) yPos1 = random.randint(0,cell_number - 2) self.lis =", "pygame import random from pygame.math import Vector2 #from .config import xSize, ySize, cell_size,", "ip1,ip2,ip3,ip4): # Lade Textur self._load_texture(ip1,ip2,ip3,ip4) # Zufällige Koordinaten für Futter xPos1 = random.randint(0,cell_number", "xPos1 = random.randint(0,cell_number - 2) yPos1 = random.randint(0,cell_number - 2) while(not self.is_start_pos_ok(xPos1,yPos1)): xPos1", "2) yPos1 = random.randint(0,cell_number - 2) xPos2 = xPos1 yPos2 = yPos1 +", "2) while(not self.is_start_pos_ok(xPos1,yPos1)): xPos1 = random.randint(0,cell_number - 2) yPos1 = random.randint(0,cell_number - 2)", "1 yPos3 = yPos1 xPos4 = xPos1 + 1 yPos4 = yPos1 +", "and yPos1 == 10): return False if(xPos1 == 8 and yPos1 == 10):", "= random.randint(0,cell_number - 2) while(not self.is_start_pos_ok(xPos1,yPos1)): xPos1 = random.randint(0,cell_number - 2) yPos1 =", "from pygame.math import Vector2 #from .config import xSize, ySize, cell_size, cell_number from .loc_conf", "Textur self._load_texture(ip1,ip2,ip3,ip4) # Zufällige Koordinaten für Futter xPos1 = random.randint(0,cell_number - 2) yPos1" ]
[ "if len(passtotake) < 8: passtotake = nametotake[:nametotake.index('.')] +passtotake pyautogui.write(nametotake) pyautogui.press(\"TAB\") pyautogui.write(passtotake) pyautogui.press(\"ENTER\") sleep(1)", "[i[:-1] for i in passwords if passwords.index(i) % 2 == 0] for i", "pyautogui from time import sleep from random import choice sleep(3) names = open(\"names.txt\",\"r\").readlines()", "% 2 == 0] for i in range(100): print(\"hehe :) \") pyautogui.click() nametotake", "names] passwords = open(\"pass.txt\",'r').readlines() passwords = [i[:-1] for i in passwords if passwords.index(i)", "passwords if passwords.index(i) % 2 == 0] for i in range(100): print(\"hehe :)", "passwords = open(\"pass.txt\",'r').readlines() passwords = [i[:-1] for i in passwords if passwords.index(i) %", "print(\"hehe :) \") pyautogui.click() nametotake = names[i+100] passtotake = choice(passwords) if len(passtotake) <", "passtotake = nametotake[:nametotake.index('.')] +passtotake pyautogui.write(nametotake) pyautogui.press(\"TAB\") pyautogui.write(passtotake) pyautogui.press(\"ENTER\") sleep(1) with pyautogui.hold('alt'): pyautogui.press('left') print(nametotake)", "names[i+100] passtotake = choice(passwords) if len(passtotake) < 8: passtotake = nametotake[:nametotake.index('.')] +passtotake pyautogui.write(nametotake)", "open(\"names.txt\",\"r\").readlines() names = [i[:-1] for i in names] passwords = open(\"pass.txt\",'r').readlines() passwords =", "= [i[:-1] for i in passwords if passwords.index(i) % 2 == 0] for", "time import sleep from random import choice sleep(3) names = open(\"names.txt\",\"r\").readlines() names =", "i in names] passwords = open(\"pass.txt\",'r').readlines() passwords = [i[:-1] for i in passwords", "\") pyautogui.click() nametotake = names[i+100] passtotake = choice(passwords) if len(passtotake) < 8: passtotake", "import sleep from random import choice sleep(3) names = open(\"names.txt\",\"r\").readlines() names = [i[:-1]", "passtotake = choice(passwords) if len(passtotake) < 8: passtotake = nametotake[:nametotake.index('.')] +passtotake pyautogui.write(nametotake) pyautogui.press(\"TAB\")", "= [i[:-1] for i in names] passwords = open(\"pass.txt\",'r').readlines() passwords = [i[:-1] for", "choice sleep(3) names = open(\"names.txt\",\"r\").readlines() names = [i[:-1] for i in names] passwords", "nametotake[:nametotake.index('.')] +passtotake pyautogui.write(nametotake) pyautogui.press(\"TAB\") pyautogui.write(passtotake) pyautogui.press(\"ENTER\") sleep(1) with pyautogui.hold('alt'): pyautogui.press('left') print(nametotake) print(passtotake) print(\"Done\\n\\n\")", "= names[i+100] passtotake = choice(passwords) if len(passtotake) < 8: passtotake = nametotake[:nametotake.index('.')] +passtotake", "for i in passwords if passwords.index(i) % 2 == 0] for i in", "pyautogui.click() nametotake = names[i+100] passtotake = choice(passwords) if len(passtotake) < 8: passtotake =", "0] for i in range(100): print(\"hehe :) \") pyautogui.click() nametotake = names[i+100] passtotake", "+passtotake pyautogui.write(nametotake) pyautogui.press(\"TAB\") pyautogui.write(passtotake) pyautogui.press(\"ENTER\") sleep(1) with pyautogui.hold('alt'): pyautogui.press('left') print(nametotake) print(passtotake) print(\"Done\\n\\n\") sleep(6)", "from time import sleep from random import choice sleep(3) names = open(\"names.txt\",\"r\").readlines() names", "2 == 0] for i in range(100): print(\"hehe :) \") pyautogui.click() nametotake =", "= choice(passwords) if len(passtotake) < 8: passtotake = nametotake[:nametotake.index('.')] +passtotake pyautogui.write(nametotake) pyautogui.press(\"TAB\") pyautogui.write(passtotake)", "names = open(\"names.txt\",\"r\").readlines() names = [i[:-1] for i in names] passwords = open(\"pass.txt\",'r').readlines()", "= nametotake[:nametotake.index('.')] +passtotake pyautogui.write(nametotake) pyautogui.press(\"TAB\") pyautogui.write(passtotake) pyautogui.press(\"ENTER\") sleep(1) with pyautogui.hold('alt'): pyautogui.press('left') print(nametotake) print(passtotake)", "= open(\"pass.txt\",'r').readlines() passwords = [i[:-1] for i in passwords if passwords.index(i) % 2", "[i[:-1] for i in names] passwords = open(\"pass.txt\",'r').readlines() passwords = [i[:-1] for i", "in passwords if passwords.index(i) % 2 == 0] for i in range(100): print(\"hehe", "import choice sleep(3) names = open(\"names.txt\",\"r\").readlines() names = [i[:-1] for i in names]", "passwords.index(i) % 2 == 0] for i in range(100): print(\"hehe :) \") pyautogui.click()", "if passwords.index(i) % 2 == 0] for i in range(100): print(\"hehe :) \")", "passwords = [i[:-1] for i in passwords if passwords.index(i) % 2 == 0]", "8: passtotake = nametotake[:nametotake.index('.')] +passtotake pyautogui.write(nametotake) pyautogui.press(\"TAB\") pyautogui.write(passtotake) pyautogui.press(\"ENTER\") sleep(1) with pyautogui.hold('alt'): pyautogui.press('left')", ":) \") pyautogui.click() nametotake = names[i+100] passtotake = choice(passwords) if len(passtotake) < 8:", "choice(passwords) if len(passtotake) < 8: passtotake = nametotake[:nametotake.index('.')] +passtotake pyautogui.write(nametotake) pyautogui.press(\"TAB\") pyautogui.write(passtotake) pyautogui.press(\"ENTER\")", "in range(100): print(\"hehe :) \") pyautogui.click() nametotake = names[i+100] passtotake = choice(passwords) if", "= open(\"names.txt\",\"r\").readlines() names = [i[:-1] for i in names] passwords = open(\"pass.txt\",'r').readlines() passwords", "import pyautogui from time import sleep from random import choice sleep(3) names =", "== 0] for i in range(100): print(\"hehe :) \") pyautogui.click() nametotake = names[i+100]", "for i in names] passwords = open(\"pass.txt\",'r').readlines() passwords = [i[:-1] for i in", "len(passtotake) < 8: passtotake = nametotake[:nametotake.index('.')] +passtotake pyautogui.write(nametotake) pyautogui.press(\"TAB\") pyautogui.write(passtotake) pyautogui.press(\"ENTER\") sleep(1) with", "nametotake = names[i+100] passtotake = choice(passwords) if len(passtotake) < 8: passtotake = nametotake[:nametotake.index('.')]", "from random import choice sleep(3) names = open(\"names.txt\",\"r\").readlines() names = [i[:-1] for i", "for i in range(100): print(\"hehe :) \") pyautogui.click() nametotake = names[i+100] passtotake =", "range(100): print(\"hehe :) \") pyautogui.click() nametotake = names[i+100] passtotake = choice(passwords) if len(passtotake)", "i in range(100): print(\"hehe :) \") pyautogui.click() nametotake = names[i+100] passtotake = choice(passwords)", "random import choice sleep(3) names = open(\"names.txt\",\"r\").readlines() names = [i[:-1] for i in", "in names] passwords = open(\"pass.txt\",'r').readlines() passwords = [i[:-1] for i in passwords if", "sleep(3) names = open(\"names.txt\",\"r\").readlines() names = [i[:-1] for i in names] passwords =", "sleep from random import choice sleep(3) names = open(\"names.txt\",\"r\").readlines() names = [i[:-1] for", "i in passwords if passwords.index(i) % 2 == 0] for i in range(100):", "open(\"pass.txt\",'r').readlines() passwords = [i[:-1] for i in passwords if passwords.index(i) % 2 ==", "names = [i[:-1] for i in names] passwords = open(\"pass.txt\",'r').readlines() passwords = [i[:-1]", "< 8: passtotake = nametotake[:nametotake.index('.')] +passtotake pyautogui.write(nametotake) pyautogui.press(\"TAB\") pyautogui.write(passtotake) pyautogui.press(\"ENTER\") sleep(1) with pyautogui.hold('alt'):" ]
[ "None class Client(object): def __init__(self, svc, config, registry): self.svc = svc self.config =", "GetRegistryName(self): \"\"\" Get registry table name \"\"\" return self.registry_name or 'roster' def GetHashKey(self):", "registry_name='', *args, **kwargs): self.registry_name = registry_name self.region = '' endpoint = os.getenv('DYNAMODB_ENDPOINT', '')", "def Unregister(self): self.stopHeartbeat = True class ClientConfig(object): def __init__(self, registry_name='', *args, **kwargs): self.registry_name", "when the main process ends. t.start() return self.service, None # Heartbeat function -", "else: item = Item(table, self.service.__dict__) item['Expiry'] = self.service.Expiry item.save() # Query the registry", "item['Expiry'] = self.service.Expiry item.save() # Query the registry for named service def Discover(self,", "Exception: pass return '', Exception(\"roster: No non loopback local IP address could be", "region \"\"\" self.region = region def GetRegistryName(self): \"\"\" Get registry table name \"\"\"", "in socket.gethostbyname_ex(socket.gethostname())[2]: if not ip.startswith(\"127.\"): return ip, '' except Exception: pass return '',", "random import threading from urlparse import urlparse from datetime import datetime, timedelta from", "heartbeat check t = threading.Thread(target=heartbeat_check, args=(self,)) t.daemon = True # causes the thread", "boto.dynamodb2.exceptions import ItemNotFound HEARTBEAT_INTERVAL = 1 # 1second TTL = 5 class Service(object):", "= int(time.mktime(datetime.now().timetuple())) items = self.svc.scan( self.registry.name, filter_expression = 'Expiry > :ExpiryVal AND #N", "Endpoint.get('S') if isinstance(Endpoint, dict) else Endpoint self.Expiry = int(Expiry.get('N')) if isinstance(Expiry, dict) else", "of the available endpoints (in effect load balancing between available endpoints) count =", "= Name.get('S') if isinstance(Name, dict) else Name self.Endpoint = Endpoint.get('S') if isinstance(Endpoint, dict)", "if self.region == '': self.region = \"us-west-2\" aws_access_key_id=os.getenv('AWS_ACCESS_KEY_ID', '') aws_secret_access_key=os.getenv('AWS_SECRET_ACCESS_KEY', '') if self.endpoint:", "NewRegistry from boto.dynamodb2.layer1 import DynamoDBConnection from boto.dynamodb2 import connect_to_region from boto.dynamodb2.items import Item", "connect_to_region from boto.dynamodb2.items import Item from boto.dynamodb2.exceptions import ItemNotFound HEARTBEAT_INTERVAL = 1 #", "svc, config, registry): self.svc = svc self.config = config self.registry = registry @classmethod", "= table.get_item(**item_info) else: item = Item(table, self.service.__dict__) item['Expiry'] = self.service.Expiry item.save() # Query", "<filename>roster/client.py<gh_stars>1-10 import time import sys import os import signal import random import threading", "= config self.registry = registry @classmethod def new(cls, *args, **kwargs): config = ClientConfig(*args,", "self.svc.scan( self.registry.name, filter_expression = 'Expiry > :ExpiryVal AND #N = :NameVal', expression_attribute_names =", "DynamoDBConnection from boto.dynamodb2 import connect_to_region from boto.dynamodb2.items import Item from boto.dynamodb2.exceptions import ItemNotFound", "else: return connect_to_region(self.region, aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key ) class CleanExit(object): def __enter__(self): return self def", "count - 1)]), None # Returns the non loopback local IP of the", "load balancing between available endpoints) count = items.get('Count') if count == 0: return", "# unix timestamp self.stopHeartbeat = int(stopHeartbeat.get('N')) if isinstance(stopHeartbeat, dict) else stopHeartbeat # Unregister", "if exc_type is KeyboardInterrupt: return True return exc_type is None class Client(object): def", "os.getenv('DYNAMODB_ENDPOINT', '') if endpoint != '': self.endpoint = endpoint self.endpoint_data = urlparse(self.endpoint) def", "not create before registration. if not self.registry.Exists(): table, err = self.registry.Create() if err:", "if isinstance(Endpoint, dict) else Endpoint self.Expiry = int(Expiry.get('N')) if isinstance(Expiry, dict) else Expiry", "err = self.registry.Create() if err: return None, err # Create Service self.service =", "'Expiry > :ExpiryVal AND #N = :NameVal', expression_attribute_names = { '#N': 'Name' },", "import os import signal import random import threading from urlparse import urlparse from", "def SetRegion(self, region): \"\"\" Set region \"\"\" self.region = region def GetRegistryName(self): \"\"\"", "'N': str(now) } } ) # Randomly select one of the available endpoints", "= True # causes the thread to terminate when the main process ends.", "self.endpoint = endpoint self.endpoint_data = urlparse(self.endpoint) def SetRegion(self, region): \"\"\" Set region \"\"\"", "*args, **kwargs): config = ClientConfig(*args, **kwargs) svc = config.GetConnection() registry = NewRegistry(svc, config.GetRegistryName())", "endpoint = os.getenv('DYNAMODB_ENDPOINT', '') if endpoint != '': self.endpoint = endpoint self.endpoint_data =", "if endpoint != '': self.endpoint = endpoint self.endpoint_data = urlparse(self.endpoint) def SetRegion(self, region):", "Endpoint, Expiry=None, stopHeartbeat=False, *args, **kwargs): self.Name = Name.get('S') if isinstance(Name, dict) else Name", "self.config = config self.registry = registry @classmethod def new(cls, *args, **kwargs): config =", "TTL and current time self.service.Expiry = int(time.mktime(datetime.now().timetuple())) if not terminate: self.service.Expiry += TTL", "= svc self.config = config self.registry = registry @classmethod def new(cls, *args, **kwargs):", "registry_name self.region = '' endpoint = os.getenv('DYNAMODB_ENDPOINT', '') if endpoint != '': self.endpoint", "on TTL and current time self.service.Expiry = int(time.mktime(datetime.now().timetuple())) if not terminate: self.service.Expiry +=", "Client(svc=svc, config=config, registry=registry) # Register the service in the registry def Register(self, name,", "Heartbeat function - updates expiry def heartbeat_check(client): # with CleanExit(): while True: if", "+= TTL table = self.registry.Table() item_info = { 'Name': self.service.Name, 'Endpoint': self.service.Endpoint }", "# Heartbeat function - updates expiry def heartbeat_check(client): # with CleanExit(): while True:", "self.region = region def GetRegistryName(self): \"\"\" Get registry table name \"\"\" return self.registry_name", "return self.registry_name or 'roster' def GetHashKey(self): return self.name def GetRangeKey(self): return self.endpoint def", "(in effect load balancing between available endpoints) count = items.get('Count') if count ==", "self.service.Endpoint } if table.has_item(**item_info): item = table.get_item(**item_info) else: item = Item(table, self.service.__dict__) item['Expiry']", "signal import random import threading from urlparse import urlparse from datetime import datetime,", "dict) else Name self.Endpoint = Endpoint.get('S') if isinstance(Endpoint, dict) else Endpoint self.Expiry =", "def __init__(self, registry_name='', *args, **kwargs): self.registry_name = registry_name self.region = '' endpoint =", "the registry has been previously created. If not create before registration. if not", "int(time.mktime(datetime.now().timetuple())) items = self.svc.scan( self.registry.name, filter_expression = 'Expiry > :ExpiryVal AND #N =", "aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, is_secure=False ) else: return connect_to_region(self.region, aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key ) class CleanExit(object): def", "== '': self.region = os.getenv('AWS_REGION', '') # Default if self.region == '': self.region", "Get registry table name \"\"\" return self.registry_name or 'roster' def GetHashKey(self): return self.name", "threading.Thread(target=heartbeat_check, args=(self,)) t.daemon = True # causes the thread to terminate when the", "== 0: return None, Exception('roster: No matching service found') else: return Service(**items['Items'][random.randint(0, count", "if err: return None, err # Create Service self.service = Service(Name=name, Endpoint=endpoint) #", "is running on def get_local_ip(self): import socket try: for ip in socket.gethostbyname_ex(socket.gethostname())[2]: if", "endpoint): # Check whether the registry has been previously created. If not create", "Returns the non loopback local IP of the host the client is running", "def GetRegistryName(self): \"\"\" Get registry table name \"\"\" return self.registry_name or 'roster' def", "name, endpoint): # Check whether the registry has been previously created. If not", "t.start() return self.service, None # Heartbeat function - updates expiry def heartbeat(self, terminate=False):", "!= '': self.endpoint = endpoint self.endpoint_data = urlparse(self.endpoint) def SetRegion(self, region): \"\"\" Set", "self.service, None # Heartbeat function - updates expiry def heartbeat(self, terminate=False): # if", "\"\"\" self.region = region def GetRegistryName(self): \"\"\" Get registry table name \"\"\" return", "expiry def heartbeat_check(client): # with CleanExit(): while True: if client.service.stopHeartbeat: client.heartbeat(terminate=True) break time.sleep(HEARTBEAT_INTERVAL)", "from boto.dynamodb2.items import Item from boto.dynamodb2.exceptions import ItemNotFound HEARTBEAT_INTERVAL = 1 # 1second", "from boto.dynamodb2 import connect_to_region from boto.dynamodb2.items import Item from boto.dynamodb2.exceptions import ItemNotFound HEARTBEAT_INTERVAL", "if not self.registry.Exists(): table, err = self.registry.Create() if err: return None, err #", "= \"us-west-2\" aws_access_key_id=os.getenv('AWS_ACCESS_KEY_ID', '') aws_secret_access_key=os.getenv('AWS_SECRET_ACCESS_KEY', '') if self.endpoint: return DynamoDBConnection( host=self.endpoint_data.hostname, port=self.endpoint_data.port, aws_access_key_id=aws_access_key_id,", "available endpoints) count = items.get('Count') if count == 0: return None, Exception('roster: No", "\"\"\" Set region \"\"\" self.region = region def GetRegistryName(self): \"\"\" Get registry table", "datetime import datetime, timedelta from registry import NewRegistry from boto.dynamodb2.layer1 import DynamoDBConnection from", "if isinstance(Name, dict) else Name self.Endpoint = Endpoint.get('S') if isinstance(Endpoint, dict) else Endpoint", "int(stopHeartbeat.get('N')) if isinstance(stopHeartbeat, dict) else stopHeartbeat # Unregister the service def Unregister(self): self.stopHeartbeat", "return True return exc_type is None class Client(object): def __init__(self, svc, config, registry):", "self.service.stopHeartbeat: # return # Update service Expiry based on TTL and current time", "boto.dynamodb2 import connect_to_region from boto.dynamodb2.items import Item from boto.dynamodb2.exceptions import ItemNotFound HEARTBEAT_INTERVAL =", "return Service(**items['Items'][random.randint(0, count - 1)]), None # Returns the non loopback local IP", "else Endpoint self.Expiry = int(Expiry.get('N')) if isinstance(Expiry, dict) else Expiry # unix timestamp", "and current time self.service.Expiry = int(time.mktime(datetime.now().timetuple())) if not terminate: self.service.Expiry += TTL table", "True class ClientConfig(object): def __init__(self, registry_name='', *args, **kwargs): self.registry_name = registry_name self.region =", "return None, Exception('roster: No matching service found') else: return Service(**items['Items'][random.randint(0, count - 1)]),", "return None, err # Create Service self.service = Service(Name=name, Endpoint=endpoint) # Ensure call", "} if table.has_item(**item_info): item = table.get_item(**item_info) else: item = Item(table, self.service.__dict__) item['Expiry'] =", "not ip.startswith(\"127.\"): return ip, '' except Exception: pass return '', Exception(\"roster: No non", "not self.registry.Exists(): table, err = self.registry.Create() if err: return None, err # Create", "threading from urlparse import urlparse from datetime import datetime, timedelta from registry import", "service Expiry based on TTL and current time self.service.Expiry = int(time.mktime(datetime.now().timetuple())) if not", "{ 'Name': self.service.Name, 'Endpoint': self.service.Endpoint } if table.has_item(**item_info): item = table.get_item(**item_info) else: item", "table.get_item(**item_info) else: item = Item(table, self.service.__dict__) item['Expiry'] = self.service.Expiry item.save() # Query the", "import random import threading from urlparse import urlparse from datetime import datetime, timedelta", "'': self.region = os.getenv('AWS_REGION', '') # Default if self.region == '': self.region =", "port=self.endpoint_data.port, aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, is_secure=False ) else: return connect_to_region(self.region, aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key ) class CleanExit(object):", "stopHeartbeat=False, *args, **kwargs): self.Name = Name.get('S') if isinstance(Name, dict) else Name self.Endpoint =", "def GetHashKey(self): return self.name def GetRangeKey(self): return self.endpoint def GetConnection(self): # Environment var", "timestamp self.stopHeartbeat = int(stopHeartbeat.get('N')) if isinstance(stopHeartbeat, dict) else stopHeartbeat # Unregister the service", "exc_type, exc_value, exc_tb): if exc_type is KeyboardInterrupt: return True return exc_type is None", "t.daemon = True # causes the thread to terminate when the main process", "if self.service.stopHeartbeat: # return # Update service Expiry based on TTL and current", "= items.get('Count') if count == 0: return None, Exception('roster: No matching service found')", "Start heartbeat check t = threading.Thread(target=heartbeat_check, args=(self,)) t.daemon = True # causes the", "} } ) # Randomly select one of the available endpoints (in effect", "**kwargs): self.registry_name = registry_name self.region = '' endpoint = os.getenv('DYNAMODB_ENDPOINT', '') if endpoint", "def __init__(self, Name, Endpoint, Expiry=None, stopHeartbeat=False, *args, **kwargs): self.Name = Name.get('S') if isinstance(Name,", "try: for ip in socket.gethostbyname_ex(socket.gethostname())[2]: if not ip.startswith(\"127.\"): return ip, '' except Exception:", "in the registry def Register(self, name, endpoint): # Check whether the registry has", "not terminate: self.service.Expiry += TTL table = self.registry.Table() item_info = { 'Name': self.service.Name,", "local IP of the host the client is running on def get_local_ip(self): import", "- updates expiry def heartbeat_check(client): # with CleanExit(): while True: if client.service.stopHeartbeat: client.heartbeat(terminate=True)", "class CleanExit(object): def __enter__(self): return self def __exit__(self, exc_type, exc_value, exc_tb): if exc_type", "endpoint != '': self.endpoint = endpoint self.endpoint_data = urlparse(self.endpoint) def SetRegion(self, region): \"\"\"", "= 1 # 1second TTL = 5 class Service(object): def __init__(self, Name, Endpoint,", "TTL table = self.registry.Table() item_info = { 'Name': self.service.Name, 'Endpoint': self.service.Endpoint } if", "socket try: for ip in socket.gethostbyname_ex(socket.gethostname())[2]: if not ip.startswith(\"127.\"): return ip, '' except", "class Client(object): def __init__(self, svc, config, registry): self.svc = svc self.config = config", "def __enter__(self): return self def __exit__(self, exc_type, exc_value, exc_tb): if exc_type is KeyboardInterrupt:", "= threading.Thread(target=heartbeat_check, args=(self,)) t.daemon = True # causes the thread to terminate when", "Expiry based on TTL and current time self.service.Expiry = int(time.mktime(datetime.now().timetuple())) if not terminate:", "import connect_to_region from boto.dynamodb2.items import Item from boto.dynamodb2.exceptions import ItemNotFound HEARTBEAT_INTERVAL = 1", "previously created. If not create before registration. if not self.registry.Exists(): table, err =", "return self.name def GetRangeKey(self): return self.endpoint def GetConnection(self): # Environment var if self.region", "if table.has_item(**item_info): item = table.get_item(**item_info) else: item = Item(table, self.service.__dict__) item['Expiry'] = self.service.Expiry", "__init__(self, svc, config, registry): self.svc = svc self.config = config self.registry = registry", "\"us-west-2\" aws_access_key_id=os.getenv('AWS_ACCESS_KEY_ID', '') aws_secret_access_key=os.getenv('AWS_SECRET_ACCESS_KEY', '') if self.endpoint: return DynamoDBConnection( host=self.endpoint_data.hostname, port=self.endpoint_data.port, aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key,", "count = items.get('Count') if count == 0: return None, Exception('roster: No matching service", ") class CleanExit(object): def __enter__(self): return self def __exit__(self, exc_type, exc_value, exc_tb): if", "exc_type is KeyboardInterrupt: return True return exc_type is None class Client(object): def __init__(self,", "least once self.heartbeat() # Start heartbeat check t = threading.Thread(target=heartbeat_check, args=(self,)) t.daemon =", "be found\") # Heartbeat function - updates expiry def heartbeat_check(client): # with CleanExit():", "current time self.service.Expiry = int(time.mktime(datetime.now().timetuple())) if not terminate: self.service.Expiry += TTL table =", "':ExpiryVal': { 'N': str(now) } } ) # Randomly select one of the", "aws_secret_access_key=os.getenv('AWS_SECRET_ACCESS_KEY', '') if self.endpoint: return DynamoDBConnection( host=self.endpoint_data.hostname, port=self.endpoint_data.port, aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, is_secure=False ) else:", "TTL = 5 class Service(object): def __init__(self, Name, Endpoint, Expiry=None, stopHeartbeat=False, *args, **kwargs):", "Service(Name=name, Endpoint=endpoint) # Ensure call heartbeat at least once self.heartbeat() # Start heartbeat", "1 # 1second TTL = 5 class Service(object): def __init__(self, Name, Endpoint, Expiry=None,", "if not ip.startswith(\"127.\"): return ip, '' except Exception: pass return '', Exception(\"roster: No", "# return # Update service Expiry based on TTL and current time self.service.Expiry", "self.stopHeartbeat = int(stopHeartbeat.get('N')) if isinstance(stopHeartbeat, dict) else stopHeartbeat # Unregister the service def", "the thread to terminate when the main process ends. t.start() return self.service, None", "Client(object): def __init__(self, svc, config, registry): self.svc = svc self.config = config self.registry", "the service in the registry def Register(self, name, endpoint): # Check whether the", "# Start heartbeat check t = threading.Thread(target=heartbeat_check, args=(self,)) t.daemon = True # causes", "GetConnection(self): # Environment var if self.region == '': self.region = os.getenv('AWS_REGION', '') #", "Service self.service = Service(Name=name, Endpoint=endpoint) # Ensure call heartbeat at least once self.heartbeat()", "\"\"\" Get registry table name \"\"\" return self.registry_name or 'roster' def GetHashKey(self): return", "running on def get_local_ip(self): import socket try: for ip in socket.gethostbyname_ex(socket.gethostname())[2]: if not", "class Service(object): def __init__(self, Name, Endpoint, Expiry=None, stopHeartbeat=False, *args, **kwargs): self.Name = Name.get('S')", "loopback local IP address could be found\") # Heartbeat function - updates expiry", "registry import NewRegistry from boto.dynamodb2.layer1 import DynamoDBConnection from boto.dynamodb2 import connect_to_region from boto.dynamodb2.items", "import Item from boto.dynamodb2.exceptions import ItemNotFound HEARTBEAT_INTERVAL = 1 # 1second TTL =", "address could be found\") # Heartbeat function - updates expiry def heartbeat_check(client): #", "exc_value, exc_tb): if exc_type is KeyboardInterrupt: return True return exc_type is None class", "isinstance(Endpoint, dict) else Endpoint self.Expiry = int(Expiry.get('N')) if isinstance(Expiry, dict) else Expiry #", "def get_local_ip(self): import socket try: for ip in socket.gethostbyname_ex(socket.gethostname())[2]: if not ip.startswith(\"127.\"): return", "from boto.dynamodb2.layer1 import DynamoDBConnection from boto.dynamodb2 import connect_to_region from boto.dynamodb2.items import Item from", "1second TTL = 5 class Service(object): def __init__(self, Name, Endpoint, Expiry=None, stopHeartbeat=False, *args,", "from datetime import datetime, timedelta from registry import NewRegistry from boto.dynamodb2.layer1 import DynamoDBConnection", "self.endpoint_data = urlparse(self.endpoint) def SetRegion(self, region): \"\"\" Set region \"\"\" self.region = region", "self.region == '': self.region = os.getenv('AWS_REGION', '') # Default if self.region == '':", "isinstance(Expiry, dict) else Expiry # unix timestamp self.stopHeartbeat = int(stopHeartbeat.get('N')) if isinstance(stopHeartbeat, dict)", "= int(time.mktime(datetime.now().timetuple())) if not terminate: self.service.Expiry += TTL table = self.registry.Table() item_info =", "'' except Exception: pass return '', Exception(\"roster: No non loopback local IP address", "between available endpoints) count = items.get('Count') if count == 0: return None, Exception('roster:", "*args, **kwargs): self.registry_name = registry_name self.region = '' endpoint = os.getenv('DYNAMODB_ENDPOINT', '') if", ":ExpiryVal AND #N = :NameVal', expression_attribute_names = { '#N': 'Name' }, expression_attribute_values =", "the registry for named service def Discover(self, name): now = int(time.mktime(datetime.now().timetuple())) items =", "= 5 class Service(object): def __init__(self, Name, Endpoint, Expiry=None, stopHeartbeat=False, *args, **kwargs): self.Name", "host the client is running on def get_local_ip(self): import socket try: for ip", "urlparse(self.endpoint) def SetRegion(self, region): \"\"\" Set region \"\"\" self.region = region def GetRegistryName(self):", "= self.registry.Table() item_info = { 'Name': self.service.Name, 'Endpoint': self.service.Endpoint } if table.has_item(**item_info): item", "exc_type is None class Client(object): def __init__(self, svc, config, registry): self.svc = svc", "self.registry.Create() if err: return None, err # Create Service self.service = Service(Name=name, Endpoint=endpoint)", "else Expiry # unix timestamp self.stopHeartbeat = int(stopHeartbeat.get('N')) if isinstance(stopHeartbeat, dict) else stopHeartbeat", "} ) # Randomly select one of the available endpoints (in effect load", "**kwargs): config = ClientConfig(*args, **kwargs) svc = config.GetConnection() registry = NewRegistry(svc, config.GetRegistryName()) return", "the client is running on def get_local_ip(self): import socket try: for ip in", "boto.dynamodb2.layer1 import DynamoDBConnection from boto.dynamodb2 import connect_to_region from boto.dynamodb2.items import Item from boto.dynamodb2.exceptions", "if self.region == '': self.region = os.getenv('AWS_REGION', '') # Default if self.region ==", "Register(self, name, endpoint): # Check whether the registry has been previously created. If", "before registration. if not self.registry.Exists(): table, err = self.registry.Create() if err: return None,", "service def Discover(self, name): now = int(time.mktime(datetime.now().timetuple())) items = self.svc.scan( self.registry.name, filter_expression =", "None, Exception('roster: No matching service found') else: return Service(**items['Items'][random.randint(0, count - 1)]), None", "Name, Endpoint, Expiry=None, stopHeartbeat=False, *args, **kwargs): self.Name = Name.get('S') if isinstance(Name, dict) else", "self.svc = svc self.config = config self.registry = registry @classmethod def new(cls, *args,", "# if self.service.stopHeartbeat: # return # Update service Expiry based on TTL and", "> :ExpiryVal AND #N = :NameVal', expression_attribute_names = { '#N': 'Name' }, expression_attribute_values", "config.GetRegistryName()) return Client(svc=svc, config=config, registry=registry) # Register the service in the registry def", "based on TTL and current time self.service.Expiry = int(time.mktime(datetime.now().timetuple())) if not terminate: self.service.Expiry", "None # Returns the non loopback local IP of the host the client", "causes the thread to terminate when the main process ends. t.start() return self.service,", "terminate: self.service.Expiry += TTL table = self.registry.Table() item_info = { 'Name': self.service.Name, 'Endpoint':", "'Name' }, expression_attribute_values = { ':NameVal': { 'S': name }, ':ExpiryVal': { 'N':", "create before registration. if not self.registry.Exists(): table, err = self.registry.Create() if err: return", "pass return '', Exception(\"roster: No non loopback local IP address could be found\")", "{ 'N': str(now) } } ) # Randomly select one of the available", "ItemNotFound HEARTBEAT_INTERVAL = 1 # 1second TTL = 5 class Service(object): def __init__(self,", "= Service(Name=name, Endpoint=endpoint) # Ensure call heartbeat at least once self.heartbeat() # Start", "return connect_to_region(self.region, aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key ) class CleanExit(object): def __enter__(self): return self def __exit__(self,", "Item(table, self.service.__dict__) item['Expiry'] = self.service.Expiry item.save() # Query the registry for named service", "local IP address could be found\") # Heartbeat function - updates expiry def", "select one of the available endpoints (in effect load balancing between available endpoints)", "class ClientConfig(object): def __init__(self, registry_name='', *args, **kwargs): self.registry_name = registry_name self.region = ''", "str(now) } } ) # Randomly select one of the available endpoints (in", "return DynamoDBConnection( host=self.endpoint_data.hostname, port=self.endpoint_data.port, aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, is_secure=False ) else: return connect_to_region(self.region, aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key", "return '', Exception(\"roster: No non loopback local IP address could be found\") #", "t = threading.Thread(target=heartbeat_check, args=(self,)) t.daemon = True # causes the thread to terminate", "= ClientConfig(*args, **kwargs) svc = config.GetConnection() registry = NewRegistry(svc, config.GetRegistryName()) return Client(svc=svc, config=config,", "def Discover(self, name): now = int(time.mktime(datetime.now().timetuple())) items = self.svc.scan( self.registry.name, filter_expression = 'Expiry", "0: return None, Exception('roster: No matching service found') else: return Service(**items['Items'][random.randint(0, count -", "matching service found') else: return Service(**items['Items'][random.randint(0, count - 1)]), None # Returns the", "ends. t.start() return self.service, None # Heartbeat function - updates expiry def heartbeat(self,", "if isinstance(stopHeartbeat, dict) else stopHeartbeat # Unregister the service def Unregister(self): self.stopHeartbeat =", "}, expression_attribute_values = { ':NameVal': { 'S': name }, ':ExpiryVal': { 'N': str(now)", "':NameVal': { 'S': name }, ':ExpiryVal': { 'N': str(now) } } ) #", "'S': name }, ':ExpiryVal': { 'N': str(now) } } ) # Randomly select", ") else: return connect_to_region(self.region, aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key ) class CleanExit(object): def __enter__(self): return self", "Exception(\"roster: No non loopback local IP address could be found\") # Heartbeat function", "exc_tb): if exc_type is KeyboardInterrupt: return True return exc_type is None class Client(object):", "of the host the client is running on def get_local_ip(self): import socket try:", "aws_secret_access_key=aws_secret_access_key, is_secure=False ) else: return connect_to_region(self.region, aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key ) class CleanExit(object): def __enter__(self):", "Name self.Endpoint = Endpoint.get('S') if isinstance(Endpoint, dict) else Endpoint self.Expiry = int(Expiry.get('N')) if", "Register the service in the registry def Register(self, name, endpoint): # Check whether", "= NewRegistry(svc, config.GetRegistryName()) return Client(svc=svc, config=config, registry=registry) # Register the service in the", "unix timestamp self.stopHeartbeat = int(stopHeartbeat.get('N')) if isinstance(stopHeartbeat, dict) else stopHeartbeat # Unregister the", "return Client(svc=svc, config=config, registry=registry) # Register the service in the registry def Register(self,", "balancing between available endpoints) count = items.get('Count') if count == 0: return None,", "*args, **kwargs): self.Name = Name.get('S') if isinstance(Name, dict) else Name self.Endpoint = Endpoint.get('S')", "urlparse from datetime import datetime, timedelta from registry import NewRegistry from boto.dynamodb2.layer1 import", "'roster' def GetHashKey(self): return self.name def GetRangeKey(self): return self.endpoint def GetConnection(self): # Environment", "self.Endpoint = Endpoint.get('S') if isinstance(Endpoint, dict) else Endpoint self.Expiry = int(Expiry.get('N')) if isinstance(Expiry,", "once self.heartbeat() # Start heartbeat check t = threading.Thread(target=heartbeat_check, args=(self,)) t.daemon = True", "KeyboardInterrupt: return True return exc_type is None class Client(object): def __init__(self, svc, config,", "import urlparse from datetime import datetime, timedelta from registry import NewRegistry from boto.dynamodb2.layer1", "os import signal import random import threading from urlparse import urlparse from datetime", "True return exc_type is None class Client(object): def __init__(self, svc, config, registry): self.svc", "thread to terminate when the main process ends. t.start() return self.service, None #", "registry has been previously created. If not create before registration. if not self.registry.Exists():", "**kwargs) svc = config.GetConnection() registry = NewRegistry(svc, config.GetRegistryName()) return Client(svc=svc, config=config, registry=registry) #", "config=config, registry=registry) # Register the service in the registry def Register(self, name, endpoint):", "else Name self.Endpoint = Endpoint.get('S') if isinstance(Endpoint, dict) else Endpoint self.Expiry = int(Expiry.get('N'))", "= 'Expiry > :ExpiryVal AND #N = :NameVal', expression_attribute_names = { '#N': 'Name'", "5 class Service(object): def __init__(self, Name, Endpoint, Expiry=None, stopHeartbeat=False, *args, **kwargs): self.Name =", "timedelta from registry import NewRegistry from boto.dynamodb2.layer1 import DynamoDBConnection from boto.dynamodb2 import connect_to_region", "__init__(self, Name, Endpoint, Expiry=None, stopHeartbeat=False, *args, **kwargs): self.Name = Name.get('S') if isinstance(Name, dict)", "registry table name \"\"\" return self.registry_name or 'roster' def GetHashKey(self): return self.name def", "Unregister the service def Unregister(self): self.stopHeartbeat = True class ClientConfig(object): def __init__(self, registry_name='',", "def new(cls, *args, **kwargs): config = ClientConfig(*args, **kwargs) svc = config.GetConnection() registry =", "updates expiry def heartbeat_check(client): # with CleanExit(): while True: if client.service.stopHeartbeat: client.heartbeat(terminate=True) break", "name }, ':ExpiryVal': { 'N': str(now) } } ) # Randomly select one", "items.get('Count') if count == 0: return None, Exception('roster: No matching service found') else:", "table name \"\"\" return self.registry_name or 'roster' def GetHashKey(self): return self.name def GetRangeKey(self):", "GetRangeKey(self): return self.endpoint def GetConnection(self): # Environment var if self.region == '': self.region", "- updates expiry def heartbeat(self, terminate=False): # if self.service.stopHeartbeat: # return # Update", "for ip in socket.gethostbyname_ex(socket.gethostname())[2]: if not ip.startswith(\"127.\"): return ip, '' except Exception: pass", "or 'roster' def GetHashKey(self): return self.name def GetRangeKey(self): return self.endpoint def GetConnection(self): #", "'': self.region = \"us-west-2\" aws_access_key_id=os.getenv('AWS_ACCESS_KEY_ID', '') aws_secret_access_key=os.getenv('AWS_SECRET_ACCESS_KEY', '') if self.endpoint: return DynamoDBConnection( host=self.endpoint_data.hostname,", "self.endpoint: return DynamoDBConnection( host=self.endpoint_data.hostname, port=self.endpoint_data.port, aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, is_secure=False ) else: return connect_to_region(self.region, aws_access_key_id=aws_access_key_id,", "self.service = Service(Name=name, Endpoint=endpoint) # Ensure call heartbeat at least once self.heartbeat() #", "item = table.get_item(**item_info) else: item = Item(table, self.service.__dict__) item['Expiry'] = self.service.Expiry item.save() #", "count == 0: return None, Exception('roster: No matching service found') else: return Service(**items['Items'][random.randint(0,", "Unregister(self): self.stopHeartbeat = True class ClientConfig(object): def __init__(self, registry_name='', *args, **kwargs): self.registry_name =", "non loopback local IP address could be found\") # Heartbeat function - updates", "'' endpoint = os.getenv('DYNAMODB_ENDPOINT', '') if endpoint != '': self.endpoint = endpoint self.endpoint_data", "Ensure call heartbeat at least once self.heartbeat() # Start heartbeat check t =", "1)]), None # Returns the non loopback local IP of the host the", "heartbeat(self, terminate=False): # if self.service.stopHeartbeat: # return # Update service Expiry based on", "registry=registry) # Register the service in the registry def Register(self, name, endpoint): #", "item = Item(table, self.service.__dict__) item['Expiry'] = self.service.Expiry item.save() # Query the registry for", ":NameVal', expression_attribute_names = { '#N': 'Name' }, expression_attribute_values = { ':NameVal': { 'S':", "self.Name = Name.get('S') if isinstance(Name, dict) else Name self.Endpoint = Endpoint.get('S') if isinstance(Endpoint,", "aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key ) class CleanExit(object): def __enter__(self): return self def __exit__(self, exc_type, exc_value,", "= Endpoint.get('S') if isinstance(Endpoint, dict) else Endpoint self.Expiry = int(Expiry.get('N')) if isinstance(Expiry, dict)", "region def GetRegistryName(self): \"\"\" Get registry table name \"\"\" return self.registry_name or 'roster'", "def __init__(self, svc, config, registry): self.svc = svc self.config = config self.registry =", "\"\"\" return self.registry_name or 'roster' def GetHashKey(self): return self.name def GetRangeKey(self): return self.endpoint", "return exc_type is None class Client(object): def __init__(self, svc, config, registry): self.svc =", "Expiry=None, stopHeartbeat=False, *args, **kwargs): self.Name = Name.get('S') if isinstance(Name, dict) else Name self.Endpoint", "err # Create Service self.service = Service(Name=name, Endpoint=endpoint) # Ensure call heartbeat at", "= self.registry.Create() if err: return None, err # Create Service self.service = Service(Name=name,", "'') if endpoint != '': self.endpoint = endpoint self.endpoint_data = urlparse(self.endpoint) def SetRegion(self,", "the host the client is running on def get_local_ip(self): import socket try: for", "service def Unregister(self): self.stopHeartbeat = True class ClientConfig(object): def __init__(self, registry_name='', *args, **kwargs):", "isinstance(Name, dict) else Name self.Endpoint = Endpoint.get('S') if isinstance(Endpoint, dict) else Endpoint self.Expiry", "self.service.Name, 'Endpoint': self.service.Endpoint } if table.has_item(**item_info): item = table.get_item(**item_info) else: item = Item(table,", "# Register the service in the registry def Register(self, name, endpoint): # Check", "the service def Unregister(self): self.stopHeartbeat = True class ClientConfig(object): def __init__(self, registry_name='', *args,", "client is running on def get_local_ip(self): import socket try: for ip in socket.gethostbyname_ex(socket.gethostname())[2]:", "GetHashKey(self): return self.name def GetRangeKey(self): return self.endpoint def GetConnection(self): # Environment var if", "IP address could be found\") # Heartbeat function - updates expiry def heartbeat_check(client):", "#N = :NameVal', expression_attribute_names = { '#N': 'Name' }, expression_attribute_values = { ':NameVal':", "# Returns the non loopback local IP of the host the client is", "self.heartbeat() # Start heartbeat check t = threading.Thread(target=heartbeat_check, args=(self,)) t.daemon = True #", "config self.registry = registry @classmethod def new(cls, *args, **kwargs): config = ClientConfig(*args, **kwargs)", "Heartbeat function - updates expiry def heartbeat(self, terminate=False): # if self.service.stopHeartbeat: # return", "function - updates expiry def heartbeat_check(client): # with CleanExit(): while True: if client.service.stopHeartbeat:", "dict) else Endpoint self.Expiry = int(Expiry.get('N')) if isinstance(Expiry, dict) else Expiry # unix", "'', Exception(\"roster: No non loopback local IP address could be found\") # Heartbeat", "Name.get('S') if isinstance(Name, dict) else Name self.Endpoint = Endpoint.get('S') if isinstance(Endpoint, dict) else", "is_secure=False ) else: return connect_to_region(self.region, aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key ) class CleanExit(object): def __enter__(self): return", "registry @classmethod def new(cls, *args, **kwargs): config = ClientConfig(*args, **kwargs) svc = config.GetConnection()", "has been previously created. If not create before registration. if not self.registry.Exists(): table,", "table.has_item(**item_info): item = table.get_item(**item_info) else: item = Item(table, self.service.__dict__) item['Expiry'] = self.service.Expiry item.save()", "self.region = \"us-west-2\" aws_access_key_id=os.getenv('AWS_ACCESS_KEY_ID', '') aws_secret_access_key=os.getenv('AWS_SECRET_ACCESS_KEY', '') if self.endpoint: return DynamoDBConnection( host=self.endpoint_data.hostname, port=self.endpoint_data.port,", "whether the registry has been previously created. If not create before registration. if", "Exception('roster: No matching service found') else: return Service(**items['Items'][random.randint(0, count - 1)]), None #", "# Update service Expiry based on TTL and current time self.service.Expiry = int(time.mktime(datetime.now().timetuple()))", "'') # Default if self.region == '': self.region = \"us-west-2\" aws_access_key_id=os.getenv('AWS_ACCESS_KEY_ID', '') aws_secret_access_key=os.getenv('AWS_SECRET_ACCESS_KEY',", "if self.endpoint: return DynamoDBConnection( host=self.endpoint_data.hostname, port=self.endpoint_data.port, aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, is_secure=False ) else: return connect_to_region(self.region,", "Environment var if self.region == '': self.region = os.getenv('AWS_REGION', '') # Default if", "been previously created. If not create before registration. if not self.registry.Exists(): table, err", "endpoints (in effect load balancing between available endpoints) count = items.get('Count') if count", "= config.GetConnection() registry = NewRegistry(svc, config.GetRegistryName()) return Client(svc=svc, config=config, registry=registry) # Register the", "region): \"\"\" Set region \"\"\" self.region = region def GetRegistryName(self): \"\"\" Get registry", "Endpoint=endpoint) # Ensure call heartbeat at least once self.heartbeat() # Start heartbeat check", "import signal import random import threading from urlparse import urlparse from datetime import", "one of the available endpoints (in effect load balancing between available endpoints) count", "now = int(time.mktime(datetime.now().timetuple())) items = self.svc.scan( self.registry.name, filter_expression = 'Expiry > :ExpiryVal AND", "dict) else Expiry # unix timestamp self.stopHeartbeat = int(stopHeartbeat.get('N')) if isinstance(stopHeartbeat, dict) else", "isinstance(stopHeartbeat, dict) else stopHeartbeat # Unregister the service def Unregister(self): self.stopHeartbeat = True", "terminate when the main process ends. t.start() return self.service, None # Heartbeat function", "self.name def GetRangeKey(self): return self.endpoint def GetConnection(self): # Environment var if self.region ==", "updates expiry def heartbeat(self, terminate=False): # if self.service.stopHeartbeat: # return # Update service", "for named service def Discover(self, name): now = int(time.mktime(datetime.now().timetuple())) items = self.svc.scan( self.registry.name,", "self.registry.Table() item_info = { 'Name': self.service.Name, 'Endpoint': self.service.Endpoint } if table.has_item(**item_info): item =", "self.service.Expiry = int(time.mktime(datetime.now().timetuple())) if not terminate: self.service.Expiry += TTL table = self.registry.Table() item_info", "item_info = { 'Name': self.service.Name, 'Endpoint': self.service.Endpoint } if table.has_item(**item_info): item = table.get_item(**item_info)", "found') else: return Service(**items['Items'][random.randint(0, count - 1)]), None # Returns the non loopback", "self.service.__dict__) item['Expiry'] = self.service.Expiry item.save() # Query the registry for named service def", "terminate=False): # if self.service.stopHeartbeat: # return # Update service Expiry based on TTL", "to terminate when the main process ends. t.start() return self.service, None # Heartbeat", "# causes the thread to terminate when the main process ends. t.start() return", "socket.gethostbyname_ex(socket.gethostname())[2]: if not ip.startswith(\"127.\"): return ip, '' except Exception: pass return '', Exception(\"roster:", "__enter__(self): return self def __exit__(self, exc_type, exc_value, exc_tb): if exc_type is KeyboardInterrupt: return", "named service def Discover(self, name): now = int(time.mktime(datetime.now().timetuple())) items = self.svc.scan( self.registry.name, filter_expression", "= int(Expiry.get('N')) if isinstance(Expiry, dict) else Expiry # unix timestamp self.stopHeartbeat = int(stopHeartbeat.get('N'))", "'') aws_secret_access_key=os.getenv('AWS_SECRET_ACCESS_KEY', '') if self.endpoint: return DynamoDBConnection( host=self.endpoint_data.hostname, port=self.endpoint_data.port, aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, is_secure=False )", "else stopHeartbeat # Unregister the service def Unregister(self): self.stopHeartbeat = True class ClientConfig(object):", "svc = config.GetConnection() registry = NewRegistry(svc, config.GetRegistryName()) return Client(svc=svc, config=config, registry=registry) # Register", "self.stopHeartbeat = True class ClientConfig(object): def __init__(self, registry_name='', *args, **kwargs): self.registry_name = registry_name", "Item from boto.dynamodb2.exceptions import ItemNotFound HEARTBEAT_INTERVAL = 1 # 1second TTL = 5", "expression_attribute_values = { ':NameVal': { 'S': name }, ':ExpiryVal': { 'N': str(now) }", "NewRegistry(svc, config.GetRegistryName()) return Client(svc=svc, config=config, registry=registry) # Register the service in the registry", "created. If not create before registration. if not self.registry.Exists(): table, err = self.registry.Create()", "available endpoints (in effect load balancing between available endpoints) count = items.get('Count') if", "ip in socket.gethostbyname_ex(socket.gethostname())[2]: if not ip.startswith(\"127.\"): return ip, '' except Exception: pass return", "endpoints) count = items.get('Count') if count == 0: return None, Exception('roster: No matching", "the main process ends. t.start() return self.service, None # Heartbeat function - updates", "the registry def Register(self, name, endpoint): # Check whether the registry has been", "could be found\") # Heartbeat function - updates expiry def heartbeat_check(client): # with", "True # causes the thread to terminate when the main process ends. t.start()", "expiry def heartbeat(self, terminate=False): # if self.service.stopHeartbeat: # return # Update service Expiry", "Update service Expiry based on TTL and current time self.service.Expiry = int(time.mktime(datetime.now().timetuple())) if", "'#N': 'Name' }, expression_attribute_values = { ':NameVal': { 'S': name }, ':ExpiryVal': {", "os.getenv('AWS_REGION', '') # Default if self.region == '': self.region = \"us-west-2\" aws_access_key_id=os.getenv('AWS_ACCESS_KEY_ID', '')", "filter_expression = 'Expiry > :ExpiryVal AND #N = :NameVal', expression_attribute_names = { '#N':", "= '' endpoint = os.getenv('DYNAMODB_ENDPOINT', '') if endpoint != '': self.endpoint = endpoint", "import threading from urlparse import urlparse from datetime import datetime, timedelta from registry", "config, registry): self.svc = svc self.config = config self.registry = registry @classmethod def", "datetime, timedelta from registry import NewRegistry from boto.dynamodb2.layer1 import DynamoDBConnection from boto.dynamodb2 import", "= urlparse(self.endpoint) def SetRegion(self, region): \"\"\" Set region \"\"\" self.region = region def", "from urlparse import urlparse from datetime import datetime, timedelta from registry import NewRegistry", "ip.startswith(\"127.\"): return ip, '' except Exception: pass return '', Exception(\"roster: No non loopback", "the available endpoints (in effect load balancing between available endpoints) count = items.get('Count')", "= :NameVal', expression_attribute_names = { '#N': 'Name' }, expression_attribute_values = { ':NameVal': {", "# Unregister the service def Unregister(self): self.stopHeartbeat = True class ClientConfig(object): def __init__(self,", "is KeyboardInterrupt: return True return exc_type is None class Client(object): def __init__(self, svc,", "item.save() # Query the registry for named service def Discover(self, name): now =", "aws_secret_access_key=aws_secret_access_key ) class CleanExit(object): def __enter__(self): return self def __exit__(self, exc_type, exc_value, exc_tb):", "if count == 0: return None, Exception('roster: No matching service found') else: return", "time self.service.Expiry = int(time.mktime(datetime.now().timetuple())) if not terminate: self.service.Expiry += TTL table = self.registry.Table()", "Default if self.region == '': self.region = \"us-west-2\" aws_access_key_id=os.getenv('AWS_ACCESS_KEY_ID', '') aws_secret_access_key=os.getenv('AWS_SECRET_ACCESS_KEY', '') if", "items = self.svc.scan( self.registry.name, filter_expression = 'Expiry > :ExpiryVal AND #N = :NameVal',", "- 1)]), None # Returns the non loopback local IP of the host", "= os.getenv('AWS_REGION', '') # Default if self.region == '': self.region = \"us-west-2\" aws_access_key_id=os.getenv('AWS_ACCESS_KEY_ID',", "on def get_local_ip(self): import socket try: for ip in socket.gethostbyname_ex(socket.gethostname())[2]: if not ip.startswith(\"127.\"):", "int(time.mktime(datetime.now().timetuple())) if not terminate: self.service.Expiry += TTL table = self.registry.Table() item_info = {", "check t = threading.Thread(target=heartbeat_check, args=(self,)) t.daemon = True # causes the thread to", "import datetime, timedelta from registry import NewRegistry from boto.dynamodb2.layer1 import DynamoDBConnection from boto.dynamodb2", "# Environment var if self.region == '': self.region = os.getenv('AWS_REGION', '') # Default", "Set region \"\"\" self.region = region def GetRegistryName(self): \"\"\" Get registry table name", "self.registry.Exists(): table, err = self.registry.Create() if err: return None, err # Create Service", "# Randomly select one of the available endpoints (in effect load balancing between", "= endpoint self.endpoint_data = urlparse(self.endpoint) def SetRegion(self, region): \"\"\" Set region \"\"\" self.region", "= int(stopHeartbeat.get('N')) if isinstance(stopHeartbeat, dict) else stopHeartbeat # Unregister the service def Unregister(self):", "process ends. t.start() return self.service, None # Heartbeat function - updates expiry def", "No matching service found') else: return Service(**items['Items'][random.randint(0, count - 1)]), None # Returns", "service found') else: return Service(**items['Items'][random.randint(0, count - 1)]), None # Returns the non", "expression_attribute_names = { '#N': 'Name' }, expression_attribute_values = { ':NameVal': { 'S': name", "var if self.region == '': self.region = os.getenv('AWS_REGION', '') # Default if self.region", "{ 'S': name }, ':ExpiryVal': { 'N': str(now) } } ) # Randomly", "aws_access_key_id=os.getenv('AWS_ACCESS_KEY_ID', '') aws_secret_access_key=os.getenv('AWS_SECRET_ACCESS_KEY', '') if self.endpoint: return DynamoDBConnection( host=self.endpoint_data.hostname, port=self.endpoint_data.port, aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, is_secure=False", "'') if self.endpoint: return DynamoDBConnection( host=self.endpoint_data.hostname, port=self.endpoint_data.port, aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, is_secure=False ) else: return", "= { '#N': 'Name' }, expression_attribute_values = { ':NameVal': { 'S': name },", "registry): self.svc = svc self.config = config self.registry = registry @classmethod def new(cls,", "config.GetConnection() registry = NewRegistry(svc, config.GetRegistryName()) return Client(svc=svc, config=config, registry=registry) # Register the service", "call heartbeat at least once self.heartbeat() # Start heartbeat check t = threading.Thread(target=heartbeat_check,", "sys import os import signal import random import threading from urlparse import urlparse", "args=(self,)) t.daemon = True # causes the thread to terminate when the main", "def heartbeat(self, terminate=False): # if self.service.stopHeartbeat: # return # Update service Expiry based", "if isinstance(Expiry, dict) else Expiry # unix timestamp self.stopHeartbeat = int(stopHeartbeat.get('N')) if isinstance(stopHeartbeat,", "at least once self.heartbeat() # Start heartbeat check t = threading.Thread(target=heartbeat_check, args=(self,)) t.daemon", "Endpoint self.Expiry = int(Expiry.get('N')) if isinstance(Expiry, dict) else Expiry # unix timestamp self.stopHeartbeat", "self.Expiry = int(Expiry.get('N')) if isinstance(Expiry, dict) else Expiry # unix timestamp self.stopHeartbeat =", "def GetConnection(self): # Environment var if self.region == '': self.region = os.getenv('AWS_REGION', '')", "# Create Service self.service = Service(Name=name, Endpoint=endpoint) # Ensure call heartbeat at least", "is None class Client(object): def __init__(self, svc, config, registry): self.svc = svc self.config", "config = ClientConfig(*args, **kwargs) svc = config.GetConnection() registry = NewRegistry(svc, config.GetRegistryName()) return Client(svc=svc,", "endpoint self.endpoint_data = urlparse(self.endpoint) def SetRegion(self, region): \"\"\" Set region \"\"\" self.region =", "import time import sys import os import signal import random import threading from", "__init__(self, registry_name='', *args, **kwargs): self.registry_name = registry_name self.region = '' endpoint = os.getenv('DYNAMODB_ENDPOINT',", "registry = NewRegistry(svc, config.GetRegistryName()) return Client(svc=svc, config=config, registry=registry) # Register the service in", "AND #N = :NameVal', expression_attribute_names = { '#N': 'Name' }, expression_attribute_values = {", "table, err = self.registry.Create() if err: return None, err # Create Service self.service", "= self.service.Expiry item.save() # Query the registry for named service def Discover(self, name):", "the non loopback local IP of the host the client is running on", "No non loopback local IP address could be found\") # Heartbeat function -", "return self def __exit__(self, exc_type, exc_value, exc_tb): if exc_type is KeyboardInterrupt: return True", "Service(**items['Items'][random.randint(0, count - 1)]), None # Returns the non loopback local IP of", "registration. if not self.registry.Exists(): table, err = self.registry.Create() if err: return None, err", "self.service.Expiry item.save() # Query the registry for named service def Discover(self, name): now", "ip, '' except Exception: pass return '', Exception(\"roster: No non loopback local IP", "'Name': self.service.Name, 'Endpoint': self.service.Endpoint } if table.has_item(**item_info): item = table.get_item(**item_info) else: item =", "# 1second TTL = 5 class Service(object): def __init__(self, Name, Endpoint, Expiry=None, stopHeartbeat=False,", "Create Service self.service = Service(Name=name, Endpoint=endpoint) # Ensure call heartbeat at least once", "err: return None, err # Create Service self.service = Service(Name=name, Endpoint=endpoint) # Ensure", "Discover(self, name): now = int(time.mktime(datetime.now().timetuple())) items = self.svc.scan( self.registry.name, filter_expression = 'Expiry >", "import ItemNotFound HEARTBEAT_INTERVAL = 1 # 1second TTL = 5 class Service(object): def", "= { ':NameVal': { 'S': name }, ':ExpiryVal': { 'N': str(now) } }", "host=self.endpoint_data.hostname, port=self.endpoint_data.port, aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, is_secure=False ) else: return connect_to_region(self.region, aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key ) class", ") # Randomly select one of the available endpoints (in effect load balancing", "= registry @classmethod def new(cls, *args, **kwargs): config = ClientConfig(*args, **kwargs) svc =", "# Heartbeat function - updates expiry def heartbeat(self, terminate=False): # if self.service.stopHeartbeat: #", "self.service.Expiry += TTL table = self.registry.Table() item_info = { 'Name': self.service.Name, 'Endpoint': self.service.Endpoint", "time import sys import os import signal import random import threading from urlparse", "stopHeartbeat # Unregister the service def Unregister(self): self.stopHeartbeat = True class ClientConfig(object): def", "HEARTBEAT_INTERVAL = 1 # 1second TTL = 5 class Service(object): def __init__(self, Name,", "self.registry = registry @classmethod def new(cls, *args, **kwargs): config = ClientConfig(*args, **kwargs) svc", "= os.getenv('DYNAMODB_ENDPOINT', '') if endpoint != '': self.endpoint = endpoint self.endpoint_data = urlparse(self.endpoint)", "registry def Register(self, name, endpoint): # Check whether the registry has been previously", "from registry import NewRegistry from boto.dynamodb2.layer1 import DynamoDBConnection from boto.dynamodb2 import connect_to_region from", "boto.dynamodb2.items import Item from boto.dynamodb2.exceptions import ItemNotFound HEARTBEAT_INTERVAL = 1 # 1second TTL", "# Default if self.region == '': self.region = \"us-west-2\" aws_access_key_id=os.getenv('AWS_ACCESS_KEY_ID', '') aws_secret_access_key=os.getenv('AWS_SECRET_ACCESS_KEY', '')", "self.registry.name, filter_expression = 'Expiry > :ExpiryVal AND #N = :NameVal', expression_attribute_names = {", "Expiry # unix timestamp self.stopHeartbeat = int(stopHeartbeat.get('N')) if isinstance(stopHeartbeat, dict) else stopHeartbeat #", "Check whether the registry has been previously created. If not create before registration.", "{ '#N': 'Name' }, expression_attribute_values = { ':NameVal': { 'S': name }, ':ExpiryVal':", "return self.service, None # Heartbeat function - updates expiry def heartbeat(self, terminate=False): #", "urlparse import urlparse from datetime import datetime, timedelta from registry import NewRegistry from", "ClientConfig(*args, **kwargs) svc = config.GetConnection() registry = NewRegistry(svc, config.GetRegistryName()) return Client(svc=svc, config=config, registry=registry)", "Service(object): def __init__(self, Name, Endpoint, Expiry=None, stopHeartbeat=False, *args, **kwargs): self.Name = Name.get('S') if", "self.region = '' endpoint = os.getenv('DYNAMODB_ENDPOINT', '') if endpoint != '': self.endpoint =", "self.endpoint def GetConnection(self): # Environment var if self.region == '': self.region = os.getenv('AWS_REGION',", "def GetRangeKey(self): return self.endpoint def GetConnection(self): # Environment var if self.region == '':", "'Endpoint': self.service.Endpoint } if table.has_item(**item_info): item = table.get_item(**item_info) else: item = Item(table, self.service.__dict__)", "DynamoDBConnection( host=self.endpoint_data.hostname, port=self.endpoint_data.port, aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, is_secure=False ) else: return connect_to_region(self.region, aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key )", "= { 'Name': self.service.Name, 'Endpoint': self.service.Endpoint } if table.has_item(**item_info): item = table.get_item(**item_info) else:", "effect load balancing between available endpoints) count = items.get('Count') if count == 0:", "registry for named service def Discover(self, name): now = int(time.mktime(datetime.now().timetuple())) items = self.svc.scan(", "{ ':NameVal': { 'S': name }, ':ExpiryVal': { 'N': str(now) } } )", "def __exit__(self, exc_type, exc_value, exc_tb): if exc_type is KeyboardInterrupt: return True return exc_type", "service in the registry def Register(self, name, endpoint): # Check whether the registry", "return self.endpoint def GetConnection(self): # Environment var if self.region == '': self.region =", "def Register(self, name, endpoint): # Check whether the registry has been previously created.", "'': self.endpoint = endpoint self.endpoint_data = urlparse(self.endpoint) def SetRegion(self, region): \"\"\" Set region", "# Ensure call heartbeat at least once self.heartbeat() # Start heartbeat check t", "None, err # Create Service self.service = Service(Name=name, Endpoint=endpoint) # Ensure call heartbeat", "main process ends. t.start() return self.service, None # Heartbeat function - updates expiry", "IP of the host the client is running on def get_local_ip(self): import socket", "ClientConfig(object): def __init__(self, registry_name='', *args, **kwargs): self.registry_name = registry_name self.region = '' endpoint", "new(cls, *args, **kwargs): config = ClientConfig(*args, **kwargs) svc = config.GetConnection() registry = NewRegistry(svc,", "self.registry_name or 'roster' def GetHashKey(self): return self.name def GetRangeKey(self): return self.endpoint def GetConnection(self):", "if not terminate: self.service.Expiry += TTL table = self.registry.Table() item_info = { 'Name':", "}, ':ExpiryVal': { 'N': str(now) } } ) # Randomly select one of", "return # Update service Expiry based on TTL and current time self.service.Expiry =", "self def __exit__(self, exc_type, exc_value, exc_tb): if exc_type is KeyboardInterrupt: return True return", "# Query the registry for named service def Discover(self, name): now = int(time.mktime(datetime.now().timetuple()))", "= Item(table, self.service.__dict__) item['Expiry'] = self.service.Expiry item.save() # Query the registry for named", "SetRegion(self, region): \"\"\" Set region \"\"\" self.region = region def GetRegistryName(self): \"\"\" Get", "svc self.config = config self.registry = registry @classmethod def new(cls, *args, **kwargs): config", "import socket try: for ip in socket.gethostbyname_ex(socket.gethostname())[2]: if not ip.startswith(\"127.\"): return ip, ''", "table = self.registry.Table() item_info = { 'Name': self.service.Name, 'Endpoint': self.service.Endpoint } if table.has_item(**item_info):", "Randomly select one of the available endpoints (in effect load balancing between available", "found\") # Heartbeat function - updates expiry def heartbeat_check(client): # with CleanExit(): while", "connect_to_region(self.region, aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key ) class CleanExit(object): def __enter__(self): return self def __exit__(self, exc_type,", "__exit__(self, exc_type, exc_value, exc_tb): if exc_type is KeyboardInterrupt: return True return exc_type is", "= region def GetRegistryName(self): \"\"\" Get registry table name \"\"\" return self.registry_name or", "dict) else stopHeartbeat # Unregister the service def Unregister(self): self.stopHeartbeat = True class", "**kwargs): self.Name = Name.get('S') if isinstance(Name, dict) else Name self.Endpoint = Endpoint.get('S') if", "import sys import os import signal import random import threading from urlparse import", "get_local_ip(self): import socket try: for ip in socket.gethostbyname_ex(socket.gethostname())[2]: if not ip.startswith(\"127.\"): return ip,", "CleanExit(object): def __enter__(self): return self def __exit__(self, exc_type, exc_value, exc_tb): if exc_type is", "heartbeat at least once self.heartbeat() # Start heartbeat check t = threading.Thread(target=heartbeat_check, args=(self,))", "= registry_name self.region = '' endpoint = os.getenv('DYNAMODB_ENDPOINT', '') if endpoint != '':", "int(Expiry.get('N')) if isinstance(Expiry, dict) else Expiry # unix timestamp self.stopHeartbeat = int(stopHeartbeat.get('N')) if", "def heartbeat_check(client): # with CleanExit(): while True: if client.service.stopHeartbeat: client.heartbeat(terminate=True) break time.sleep(HEARTBEAT_INTERVAL) client.heartbeat()", "function - updates expiry def heartbeat(self, terminate=False): # if self.service.stopHeartbeat: # return #", "except Exception: pass return '', Exception(\"roster: No non loopback local IP address could", "@classmethod def new(cls, *args, **kwargs): config = ClientConfig(*args, **kwargs) svc = config.GetConnection() registry", "== '': self.region = \"us-west-2\" aws_access_key_id=os.getenv('AWS_ACCESS_KEY_ID', '') aws_secret_access_key=os.getenv('AWS_SECRET_ACCESS_KEY', '') if self.endpoint: return DynamoDBConnection(", "= self.svc.scan( self.registry.name, filter_expression = 'Expiry > :ExpiryVal AND #N = :NameVal', expression_attribute_names", "else: return Service(**items['Items'][random.randint(0, count - 1)]), None # Returns the non loopback local", "name \"\"\" return self.registry_name or 'roster' def GetHashKey(self): return self.name def GetRangeKey(self): return", "non loopback local IP of the host the client is running on def", "return ip, '' except Exception: pass return '', Exception(\"roster: No non loopback local", "self.region == '': self.region = \"us-west-2\" aws_access_key_id=os.getenv('AWS_ACCESS_KEY_ID', '') aws_secret_access_key=os.getenv('AWS_SECRET_ACCESS_KEY', '') if self.endpoint: return", "self.registry_name = registry_name self.region = '' endpoint = os.getenv('DYNAMODB_ENDPOINT', '') if endpoint !=", "None # Heartbeat function - updates expiry def heartbeat(self, terminate=False): # if self.service.stopHeartbeat:", "loopback local IP of the host the client is running on def get_local_ip(self):", "If not create before registration. if not self.registry.Exists(): table, err = self.registry.Create() if", "import NewRegistry from boto.dynamodb2.layer1 import DynamoDBConnection from boto.dynamodb2 import connect_to_region from boto.dynamodb2.items import", "import DynamoDBConnection from boto.dynamodb2 import connect_to_region from boto.dynamodb2.items import Item from boto.dynamodb2.exceptions import", "from boto.dynamodb2.exceptions import ItemNotFound HEARTBEAT_INTERVAL = 1 # 1second TTL = 5 class", "# Check whether the registry has been previously created. If not create before", "Query the registry for named service def Discover(self, name): now = int(time.mktime(datetime.now().timetuple())) items", "name): now = int(time.mktime(datetime.now().timetuple())) items = self.svc.scan( self.registry.name, filter_expression = 'Expiry > :ExpiryVal", "self.region = os.getenv('AWS_REGION', '') # Default if self.region == '': self.region = \"us-west-2\"", "= True class ClientConfig(object): def __init__(self, registry_name='', *args, **kwargs): self.registry_name = registry_name self.region" ]
[ "NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version", "admin_service.AdminCommunicatingStub(channel) client = admin_msg.Client() client.client_name = self.client_name messages = stub.Retrieve(client) for i in", "root_cert: root certificate ssl_cert: SSL certificate private_key: private key server_args: server args secure:", "KIND, either express or implied. # See the License for the specific language", "Unless required by applicable law or agreed to in writing, software # distributed", "rank=0, ): \"\"\"To init the AdminMessageSender. Args: client_name: client name root_cert: root certificate", "channel \"\"\" if self.secure: with open(self.root_cert, \"rb\") as f: trusted_certs = f.read() with", "\"rb\") as f: private_key = f.read() with open(self.ssl_cert, \"rb\") as f: certificate_chain =", "2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License,", "taskname in tuple(self.servers): self._send_client_reply(message, taskname) def _send_client_reply(self, message, taskname): try: with self._set_up_channel(self.servers[taskname]) as", "server. Args: channel_dict: grpc channel parameters Returns: an initialised grpc channel \"\"\" if", "= ThreadPool(len(self.servers)) def send_reply(self, message: Message): \"\"\"Call to send the request message. Args:", "\"\"\" if self.secure: with open(self.root_cert, \"rb\") as f: trusted_certs = f.read() with open(self.private_key,", "threading from multiprocessing.dummy import Pool as ThreadPool import grpc import nvflare.private.fed.protos.admin_pb2 as admin_msg", "certificate private_key: private key server_args: server args secure: True/False is_multi_gpu: True/False rank: local", "self.pool = ThreadPool(len(self.servers)) def send_reply(self, message: Message): \"\"\"Call to send the request message.", "secure=False, is_multi_gpu=False, rank=0, ): \"\"\"To init the AdminMessageSender. Args: client_name: client name root_cert:", "send_result(self, message: Message): \"\"\"Send the processor results to server. Args: message: message \"\"\"", "initialised grpc channel \"\"\" if self.secure: with open(self.root_cert, \"rb\") as f: trusted_certs =", "as admin_service from nvflare.private.admin_defs import Message from nvflare.private.fed.utils.messageproto import message_to_proto, proto_to_message from .admin", "def _send_client_reply(self, message, taskname): try: with self._set_up_channel(self.servers[taskname]) as channel: stub = admin_service.AdminCommunicatingStub(channel) reply", "\"\"\"Send the processor results to server. Args: message: message \"\"\" if self.rank ==", "this file except in compliance with the License. # You may obtain a", "= ssl_cert self.private_key = private_key self.secure = secure self.servers = server_args self.multi_gpu =", "the request message. Args: message: request message \"\"\" if self.rank == 0: #", "for taskname in tuple(self.servers): try: with self._set_up_channel(self.servers[taskname]) as channel: stub = admin_service.AdminCommunicatingStub(channel) reply", "certificate_chain = f.read() call_credentials = grpc.metadata_call_credentials( lambda context, callback: callback(((\"x-custom-token\", self.client_name),), None) )", "limitations under the License. \"\"\"This is the FLAdmin Client to send the request", "messages = stub.Retrieve(client) for i in messages.message: message_list.append(proto_to_message(i)) except Exception as e: messages", "server. Args: message: message \"\"\" if self.rank == 0: for taskname in tuple(self.servers):", "reply = admin_msg.Reply() reply.client_name = self.client_name reply.message.CopyFrom(message_to_proto(message)) stub.SendResult(reply) except BaseException: pass def _set_up_channel(self,", "= admin_service.AdminCommunicatingStub(channel) reply = admin_msg.Reply() reply.client_name = self.client_name reply.message.CopyFrom(message_to_proto(message)) # reply.message = message_to_proto(message)", "= grpc.composite_channel_credentials(credentials, call_credentials) channel = grpc.secure_channel(**channel_dict, credentials=composite_credentials) else: channel = grpc.insecure_channel(**channel_dict) return channel", "messages.extend(item) return messages def _retrieve_client_requests(self, taskname): try: message_list = [] with self._set_up_channel(self.servers[taskname]) as", "ANY KIND, either express or implied. # See the License for the specific", "server_args: server args secure: True/False is_multi_gpu: True/False rank: local process rank \"\"\" self.client_name", "pass def _set_up_channel(self, channel_dict): \"\"\"Connect client to the server. Args: channel_dict: grpc channel", "self, client_name, root_cert=None, ssl_cert=None, private_key=None, server_args=None, secure=False, is_multi_gpu=False, rank=0, ): \"\"\"To init the", "try: message_list = [] with self._set_up_channel(self.servers[taskname]) as channel: stub = admin_service.AdminCommunicatingStub(channel) client =", "root_cert self.ssl_cert = ssl_cert self.private_key = private_key self.secure = secure self.servers = server_args", "multiprocessing.dummy import Pool as ThreadPool import grpc import nvflare.private.fed.protos.admin_pb2 as admin_msg import nvflare.private.fed.protos.admin_pb2_grpc", "admin_service.AdminCommunicatingStub(channel) reply = admin_msg.Reply() reply.client_name = self.client_name reply.message.CopyFrom(message_to_proto(message)) # reply.message = message_to_proto(message) stub.SendReply(reply)", "[] with self._set_up_channel(self.servers[taskname]) as channel: stub = admin_service.AdminCommunicatingStub(channel) client = admin_msg.Client() client.client_name =", "is_multi_gpu: True/False rank: local process rank \"\"\" self.client_name = client_name self.root_cert = root_cert", "stub.SendResult(reply) except BaseException: pass def _set_up_channel(self, channel_dict): \"\"\"Connect client to the server. Args:", "from multiprocessing.dummy import Pool as ThreadPool import grpc import nvflare.private.fed.protos.admin_pb2 as admin_msg import", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "BaseException: pass def retrieve_requests(self) -> [Message]: \"\"\"Send the message to retrieve pending requests", "under the License. \"\"\"This is the FLAdmin Client to send the request message", "SSL certificate private_key: private key server_args: server args secure: True/False is_multi_gpu: True/False rank:", "to the admin server.\"\"\" import threading from multiprocessing.dummy import Pool as ThreadPool import", "messages. \"\"\" messages = [] if self.rank == 0: items = self.pool.map(self._retrieve_client_requests, tuple(self.servers))", "reserved. # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "_send_client_reply(self, message, taskname): try: with self._set_up_channel(self.servers[taskname]) as channel: stub = admin_service.AdminCommunicatingStub(channel) reply =", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "retrieve pending requests from the Server. Returns: list of messages. \"\"\" messages =", "OF ANY KIND, either express or implied. # See the License for the", "self.rank == 0: items = self.pool.map(self._retrieve_client_requests, tuple(self.servers)) for item in items: messages.extend(item) return", "items: messages.extend(item) return messages def _retrieve_client_requests(self, taskname): try: message_list = [] with self._set_up_channel(self.servers[taskname])", "def retrieve_requests(self) -> [Message]: \"\"\"Send the message to retrieve pending requests from the", "the admin server.\"\"\" def __init__( self, client_name, root_cert=None, ssl_cert=None, private_key=None, server_args=None, secure=False, is_multi_gpu=False,", "channel: stub = admin_service.AdminCommunicatingStub(channel) reply = admin_msg.Reply() reply.client_name = self.client_name reply.message.CopyFrom(message_to_proto(message)) stub.SendResult(reply) except", "for the specific language governing permissions and # limitations under the License. \"\"\"This", "self.client_name reply.message.CopyFrom(message_to_proto(message)) stub.SendResult(reply) except BaseException: pass def _set_up_channel(self, channel_dict): \"\"\"Connect client to the", "as f: private_key = f.read() with open(self.ssl_cert, \"rb\") as f: certificate_chain = f.read()", "self.send_client_reply(message) for taskname in tuple(self.servers): self._send_client_reply(message, taskname) def _send_client_reply(self, message, taskname): try: with", "import Pool as ThreadPool import grpc import nvflare.private.fed.protos.admin_pb2 as admin_msg import nvflare.private.fed.protos.admin_pb2_grpc as", "True/False rank: local process rank \"\"\" self.client_name = client_name self.root_cert = root_cert self.ssl_cert", "import message_to_proto, proto_to_message from .admin import Sender lock = threading.Lock() class AdminMessageSender(Sender): \"\"\"AdminMessageSender", "grpc channel parameters Returns: an initialised grpc channel \"\"\" if self.secure: with open(self.root_cert,", "Message): \"\"\"Send the processor results to server. Args: message: message \"\"\" if self.rank", "taskname): try: with self._set_up_channel(self.servers[taskname]) as channel: stub = admin_service.AdminCommunicatingStub(channel) reply = admin_msg.Reply() reply.client_name", "self.client_name = client_name self.root_cert = root_cert self.ssl_cert = ssl_cert self.private_key = private_key self.secure", "stub = admin_service.AdminCommunicatingStub(channel) reply = admin_msg.Reply() reply.client_name = self.client_name reply.message.CopyFrom(message_to_proto(message)) stub.SendResult(reply) except BaseException:", "self.client_name messages = stub.Retrieve(client) for i in messages.message: message_list.append(proto_to_message(i)) except Exception as e:", "the AdminMessageSender. Args: client_name: client name root_cert: root certificate ssl_cert: SSL certificate private_key:", "message_to_proto, proto_to_message from .admin import Sender lock = threading.Lock() class AdminMessageSender(Sender): \"\"\"AdminMessageSender to", "send the request message to the admin server.\"\"\" def __init__( self, client_name, root_cert=None,", "messages def _retrieve_client_requests(self, taskname): try: message_list = [] with self._set_up_channel(self.servers[taskname]) as channel: stub", "for taskname in tuple(self.servers): self._send_client_reply(message, taskname) def _send_client_reply(self, message, taskname): try: with self._set_up_channel(self.servers[taskname])", "stub.Retrieve(client) for i in messages.message: message_list.append(proto_to_message(i)) except Exception as e: messages = None", "retrieve_requests(self) -> [Message]: \"\"\"Send the message to retrieve pending requests from the Server.", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "admin_msg.Client() client.client_name = self.client_name messages = stub.Retrieve(client) for i in messages.message: message_list.append(proto_to_message(i)) except", "\"rb\") as f: certificate_chain = f.read() call_credentials = grpc.metadata_call_credentials( lambda context, callback: callback(((\"x-custom-token\",", "= is_multi_gpu self.rank = rank self.pool = ThreadPool(len(self.servers)) def send_reply(self, message: Message): \"\"\"Call", "Args: channel_dict: grpc channel parameters Returns: an initialised grpc channel \"\"\" if self.secure:", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "= grpc.metadata_call_credentials( lambda context, callback: callback(((\"x-custom-token\", self.client_name),), None) ) credentials = grpc.ssl_channel_credentials( certificate_chain=certificate_chain,", "message to retrieve pending requests from the Server. Returns: list of messages. \"\"\"", "root certificate ssl_cert: SSL certificate private_key: private key server_args: server args secure: True/False", "taskname) def _send_client_reply(self, message, taskname): try: with self._set_up_channel(self.servers[taskname]) as channel: stub = admin_service.AdminCommunicatingStub(channel)", "== 0: items = self.pool.map(self._retrieve_client_requests, tuple(self.servers)) for item in items: messages.extend(item) return messages", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "# self.send_client_reply(message) for taskname in tuple(self.servers): self._send_client_reply(message, taskname) def _send_client_reply(self, message, taskname): try:", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "f.read() with open(self.private_key, \"rb\") as f: private_key = f.read() with open(self.ssl_cert, \"rb\") as", "def send_result(self, message: Message): \"\"\"Send the processor results to server. Args: message: message", "import Message from nvflare.private.fed.utils.messageproto import message_to_proto, proto_to_message from .admin import Sender lock =", "(c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "None) ) credentials = grpc.ssl_channel_credentials( certificate_chain=certificate_chain, private_key=private_key, root_certificates=trusted_certs ) composite_credentials = grpc.composite_channel_credentials(credentials, call_credentials)", "to the server. Args: channel_dict: grpc channel parameters Returns: an initialised grpc channel", "self.rank == 0: for taskname in tuple(self.servers): try: with self._set_up_channel(self.servers[taskname]) as channel: stub", "required by applicable law or agreed to in writing, software # distributed under", "self.pool.map(self._retrieve_client_requests, tuple(self.servers)) for item in items: messages.extend(item) return messages def _retrieve_client_requests(self, taskname): try:", "client.client_name = self.client_name messages = stub.Retrieve(client) for i in messages.message: message_list.append(proto_to_message(i)) except Exception", "applicable law or agreed to in writing, software # distributed under the License", "the Server. Returns: list of messages. \"\"\" messages = [] if self.rank ==", ") credentials = grpc.ssl_channel_credentials( certificate_chain=certificate_chain, private_key=private_key, root_certificates=trusted_certs ) composite_credentials = grpc.composite_channel_credentials(credentials, call_credentials) channel", "__init__( self, client_name, root_cert=None, ssl_cert=None, private_key=None, server_args=None, secure=False, is_multi_gpu=False, rank=0, ): \"\"\"To init", "f.read() with open(self.ssl_cert, \"rb\") as f: certificate_chain = f.read() call_credentials = grpc.metadata_call_credentials( lambda", "or agreed to in writing, software # distributed under the License is distributed", "if self.rank == 0: for taskname in tuple(self.servers): try: with self._set_up_channel(self.servers[taskname]) as channel:", "return message_list def send_result(self, message: Message): \"\"\"Send the processor results to server. Args:", "threading.Lock() class AdminMessageSender(Sender): \"\"\"AdminMessageSender to send the request message to the admin server.\"\"\"", "as f: certificate_chain = f.read() call_credentials = grpc.metadata_call_credentials( lambda context, callback: callback(((\"x-custom-token\", self.client_name),),", "_retrieve_client_requests(self, taskname): try: message_list = [] with self._set_up_channel(self.servers[taskname]) as channel: stub = admin_service.AdminCommunicatingStub(channel)", "as f: trusted_certs = f.read() with open(self.private_key, \"rb\") as f: private_key = f.read()", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "All rights reserved. # # Licensed under the Apache License, Version 2.0 (the", "== 0: # self.send_client_reply(message) for taskname in tuple(self.servers): self._send_client_reply(message, taskname) def _send_client_reply(self, message,", "lock = threading.Lock() class AdminMessageSender(Sender): \"\"\"AdminMessageSender to send the request message to the", "\"rb\") as f: trusted_certs = f.read() with open(self.private_key, \"rb\") as f: private_key =", "under the Apache License, Version 2.0 (the \"License\"); # you may not use", "credentials = grpc.ssl_channel_credentials( certificate_chain=certificate_chain, private_key=private_key, root_certificates=trusted_certs ) composite_credentials = grpc.composite_channel_credentials(credentials, call_credentials) channel =", "writing, software # distributed under the License is distributed on an \"AS IS\"", "= server_args self.multi_gpu = is_multi_gpu self.rank = rank self.pool = ThreadPool(len(self.servers)) def send_reply(self,", "the request message to the admin server.\"\"\" import threading from multiprocessing.dummy import Pool", "grpc channel \"\"\" if self.secure: with open(self.root_cert, \"rb\") as f: trusted_certs = f.read()", "the processor results to server. Args: message: message \"\"\" if self.rank == 0:", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "composite_credentials = grpc.composite_channel_credentials(credentials, call_credentials) channel = grpc.secure_channel(**channel_dict, credentials=composite_credentials) else: channel = grpc.insecure_channel(**channel_dict) return", "License. # You may obtain a copy of the License at # #", "message: request message \"\"\" if self.rank == 0: # self.send_client_reply(message) for taskname in", "private_key = f.read() with open(self.ssl_cert, \"rb\") as f: certificate_chain = f.read() call_credentials =", "Client to send the request message to the admin server.\"\"\" import threading from", "private_key: private key server_args: server args secure: True/False is_multi_gpu: True/False rank: local process", "compliance with the License. # You may obtain a copy of the License", "message \"\"\" if self.rank == 0: # self.send_client_reply(message) for taskname in tuple(self.servers): self._send_client_reply(message,", "f: trusted_certs = f.read() with open(self.private_key, \"rb\") as f: private_key = f.read() with", "with open(self.root_cert, \"rb\") as f: trusted_certs = f.read() with open(self.private_key, \"rb\") as f:", "specific language governing permissions and # limitations under the License. \"\"\"This is the", "server_args=None, secure=False, is_multi_gpu=False, rank=0, ): \"\"\"To init the AdminMessageSender. Args: client_name: client name", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "\"\"\" if self.rank == 0: # self.send_client_reply(message) for taskname in tuple(self.servers): self._send_client_reply(message, taskname)", "= self.client_name reply.message.CopyFrom(message_to_proto(message)) # reply.message = message_to_proto(message) stub.SendReply(reply) except BaseException: pass def retrieve_requests(self)", "the message to retrieve pending requests from the Server. Returns: list of messages.", "admin server.\"\"\" import threading from multiprocessing.dummy import Pool as ThreadPool import grpc import", "message_list def send_result(self, message: Message): \"\"\"Send the processor results to server. Args: message:", "= threading.Lock() class AdminMessageSender(Sender): \"\"\"AdminMessageSender to send the request message to the admin", "self.rank == 0: # self.send_client_reply(message) for taskname in tuple(self.servers): self._send_client_reply(message, taskname) def _send_client_reply(self,", "list of messages. \"\"\" messages = [] if self.rank == 0: items =", "i in messages.message: message_list.append(proto_to_message(i)) except Exception as e: messages = None return message_list", "self.secure: with open(self.root_cert, \"rb\") as f: trusted_certs = f.read() with open(self.private_key, \"rb\") as", "not use this file except in compliance with the License. # You may", "taskname): try: message_list = [] with self._set_up_channel(self.servers[taskname]) as channel: stub = admin_service.AdminCommunicatingStub(channel) client", "None return message_list def send_result(self, message: Message): \"\"\"Send the processor results to server.", "= private_key self.secure = secure self.servers = server_args self.multi_gpu = is_multi_gpu self.rank =", "as channel: stub = admin_service.AdminCommunicatingStub(channel) reply = admin_msg.Reply() reply.client_name = self.client_name reply.message.CopyFrom(message_to_proto(message)) stub.SendResult(reply)", "f.read() call_credentials = grpc.metadata_call_credentials( lambda context, callback: callback(((\"x-custom-token\", self.client_name),), None) ) credentials =", "message \"\"\" if self.rank == 0: for taskname in tuple(self.servers): try: with self._set_up_channel(self.servers[taskname])", "License, Version 2.0 (the \"License\"); # you may not use this file except", "except BaseException: pass def _set_up_channel(self, channel_dict): \"\"\"Connect client to the server. Args: channel_dict:", "== 0: for taskname in tuple(self.servers): try: with self._set_up_channel(self.servers[taskname]) as channel: stub =", "name root_cert: root certificate ssl_cert: SSL certificate private_key: private key server_args: server args", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "Exception as e: messages = None return message_list def send_result(self, message: Message): \"\"\"Send", "# you may not use this file except in compliance with the License.", "as channel: stub = admin_service.AdminCommunicatingStub(channel) client = admin_msg.Client() client.client_name = self.client_name messages =", "the admin server.\"\"\" import threading from multiprocessing.dummy import Pool as ThreadPool import grpc", "Message): \"\"\"Call to send the request message. Args: message: request message \"\"\" if", "of messages. \"\"\" messages = [] if self.rank == 0: items = self.pool.map(self._retrieve_client_requests,", "agreed to in writing, software # distributed under the License is distributed on", "= message_to_proto(message) stub.SendReply(reply) except BaseException: pass def retrieve_requests(self) -> [Message]: \"\"\"Send the message", "self.ssl_cert = ssl_cert self.private_key = private_key self.secure = secure self.servers = server_args self.multi_gpu", "pass def retrieve_requests(self) -> [Message]: \"\"\"Send the message to retrieve pending requests from", "): \"\"\"To init the AdminMessageSender. Args: client_name: client name root_cert: root certificate ssl_cert:", "self.client_name),), None) ) credentials = grpc.ssl_channel_credentials( certificate_chain=certificate_chain, private_key=private_key, root_certificates=trusted_certs ) composite_credentials = grpc.composite_channel_credentials(credentials,", "ThreadPool import grpc import nvflare.private.fed.protos.admin_pb2 as admin_msg import nvflare.private.fed.protos.admin_pb2_grpc as admin_service from nvflare.private.admin_defs", "(the \"License\"); # you may not use this file except in compliance with", "import nvflare.private.fed.protos.admin_pb2 as admin_msg import nvflare.private.fed.protos.admin_pb2_grpc as admin_service from nvflare.private.admin_defs import Message from", "client = admin_msg.Client() client.client_name = self.client_name messages = stub.Retrieve(client) for i in messages.message:", "\"\"\"To init the AdminMessageSender. Args: client_name: client name root_cert: root certificate ssl_cert: SSL", "for i in messages.message: message_list.append(proto_to_message(i)) except Exception as e: messages = None return", "callback(((\"x-custom-token\", self.client_name),), None) ) credentials = grpc.ssl_channel_credentials( certificate_chain=certificate_chain, private_key=private_key, root_certificates=trusted_certs ) composite_credentials =", "# Unless required by applicable law or agreed to in writing, software #", "nvflare.private.fed.protos.admin_pb2 as admin_msg import nvflare.private.fed.protos.admin_pb2_grpc as admin_service from nvflare.private.admin_defs import Message from nvflare.private.fed.utils.messageproto", "by applicable law or agreed to in writing, software # distributed under the", "nvflare.private.admin_defs import Message from nvflare.private.fed.utils.messageproto import message_to_proto, proto_to_message from .admin import Sender lock", "in items: messages.extend(item) return messages def _retrieve_client_requests(self, taskname): try: message_list = [] with", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "certificate ssl_cert: SSL certificate private_key: private key server_args: server args secure: True/False is_multi_gpu:", "return messages def _retrieve_client_requests(self, taskname): try: message_list = [] with self._set_up_channel(self.servers[taskname]) as channel:", "tuple(self.servers): try: with self._set_up_channel(self.servers[taskname]) as channel: stub = admin_service.AdminCommunicatingStub(channel) reply = admin_msg.Reply() reply.client_name", "self._send_client_reply(message, taskname) def _send_client_reply(self, message, taskname): try: with self._set_up_channel(self.servers[taskname]) as channel: stub =", "0: # self.send_client_reply(message) for taskname in tuple(self.servers): self._send_client_reply(message, taskname) def _send_client_reply(self, message, taskname):", "e: messages = None return message_list def send_result(self, message: Message): \"\"\"Send the processor", "self.private_key = private_key self.secure = secure self.servers = server_args self.multi_gpu = is_multi_gpu self.rank", "as e: messages = None return message_list def send_result(self, message: Message): \"\"\"Send the", "# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under", "args secure: True/False is_multi_gpu: True/False rank: local process rank \"\"\" self.client_name = client_name", "file except in compliance with the License. # You may obtain a copy", "message_list.append(proto_to_message(i)) except Exception as e: messages = None return message_list def send_result(self, message:", "reply.client_name = self.client_name reply.message.CopyFrom(message_to_proto(message)) # reply.message = message_to_proto(message) stub.SendReply(reply) except BaseException: pass def", "from nvflare.private.admin_defs import Message from nvflare.private.fed.utils.messageproto import message_to_proto, proto_to_message from .admin import Sender", "License for the specific language governing permissions and # limitations under the License.", "admin_service from nvflare.private.admin_defs import Message from nvflare.private.fed.utils.messageproto import message_to_proto, proto_to_message from .admin import", "item in items: messages.extend(item) return messages def _retrieve_client_requests(self, taskname): try: message_list = []", "to in writing, software # distributed under the License is distributed on an", "implied. # See the License for the specific language governing permissions and #", "messages.message: message_list.append(proto_to_message(i)) except Exception as e: messages = None return message_list def send_result(self,", "\"License\"); # you may not use this file except in compliance with the", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "is_multi_gpu=False, rank=0, ): \"\"\"To init the AdminMessageSender. Args: client_name: client name root_cert: root", "BaseException: pass def _set_up_channel(self, channel_dict): \"\"\"Connect client to the server. Args: channel_dict: grpc", "FLAdmin Client to send the request message to the admin server.\"\"\" import threading", ") composite_credentials = grpc.composite_channel_credentials(credentials, call_credentials) channel = grpc.secure_channel(**channel_dict, credentials=composite_credentials) else: channel = grpc.insecure_channel(**channel_dict)", "import threading from multiprocessing.dummy import Pool as ThreadPool import grpc import nvflare.private.fed.protos.admin_pb2 as", "in tuple(self.servers): try: with self._set_up_channel(self.servers[taskname]) as channel: stub = admin_service.AdminCommunicatingStub(channel) reply = admin_msg.Reply()", "results to server. Args: message: message \"\"\" if self.rank == 0: for taskname", "admin_service.AdminCommunicatingStub(channel) reply = admin_msg.Reply() reply.client_name = self.client_name reply.message.CopyFrom(message_to_proto(message)) stub.SendResult(reply) except BaseException: pass def", "# reply.message = message_to_proto(message) stub.SendReply(reply) except BaseException: pass def retrieve_requests(self) -> [Message]: \"\"\"Send", "message, taskname): try: with self._set_up_channel(self.servers[taskname]) as channel: stub = admin_service.AdminCommunicatingStub(channel) reply = admin_msg.Reply()", "or implied. # See the License for the specific language governing permissions and", "= f.read() with open(self.ssl_cert, \"rb\") as f: certificate_chain = f.read() call_credentials = grpc.metadata_call_credentials(", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "an initialised grpc channel \"\"\" if self.secure: with open(self.root_cert, \"rb\") as f: trusted_certs", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "to retrieve pending requests from the Server. Returns: list of messages. \"\"\" messages", "open(self.ssl_cert, \"rb\") as f: certificate_chain = f.read() call_credentials = grpc.metadata_call_credentials( lambda context, callback:", "Args: client_name: client name root_cert: root certificate ssl_cert: SSL certificate private_key: private key", "secure: True/False is_multi_gpu: True/False rank: local process rank \"\"\" self.client_name = client_name self.root_cert", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "in writing, software # distributed under the License is distributed on an \"AS", "to the admin server.\"\"\" def __init__( self, client_name, root_cert=None, ssl_cert=None, private_key=None, server_args=None, secure=False,", "def _retrieve_client_requests(self, taskname): try: message_list = [] with self._set_up_channel(self.servers[taskname]) as channel: stub =", "send the request message to the admin server.\"\"\" import threading from multiprocessing.dummy import", "# See the License for the specific language governing permissions and # limitations", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "stub = admin_service.AdminCommunicatingStub(channel) client = admin_msg.Client() client.client_name = self.client_name messages = stub.Retrieve(client) for", "the server. Args: channel_dict: grpc channel parameters Returns: an initialised grpc channel \"\"\"", "reply.message.CopyFrom(message_to_proto(message)) stub.SendResult(reply) except BaseException: pass def _set_up_channel(self, channel_dict): \"\"\"Connect client to the server.", "server.\"\"\" def __init__( self, client_name, root_cert=None, ssl_cert=None, private_key=None, server_args=None, secure=False, is_multi_gpu=False, rank=0, ):", "except BaseException: pass def retrieve_requests(self) -> [Message]: \"\"\"Send the message to retrieve pending", "send_reply(self, message: Message): \"\"\"Call to send the request message. Args: message: request message", "import nvflare.private.fed.protos.admin_pb2_grpc as admin_service from nvflare.private.admin_defs import Message from nvflare.private.fed.utils.messageproto import message_to_proto, proto_to_message", "Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the", "open(self.private_key, \"rb\") as f: private_key = f.read() with open(self.ssl_cert, \"rb\") as f: certificate_chain", "client to the server. Args: channel_dict: grpc channel parameters Returns: an initialised grpc", "Args: message: message \"\"\" if self.rank == 0: for taskname in tuple(self.servers): try:", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "if self.secure: with open(self.root_cert, \"rb\") as f: trusted_certs = f.read() with open(self.private_key, \"rb\")", "you may not use this file except in compliance with the License. #", "try: with self._set_up_channel(self.servers[taskname]) as channel: stub = admin_service.AdminCommunicatingStub(channel) reply = admin_msg.Reply() reply.client_name =", "self.rank = rank self.pool = ThreadPool(len(self.servers)) def send_reply(self, message: Message): \"\"\"Call to send", "except Exception as e: messages = None return message_list def send_result(self, message: Message):", "AdminMessageSender(Sender): \"\"\"AdminMessageSender to send the request message to the admin server.\"\"\" def __init__(", "to server. Args: message: message \"\"\" if self.rank == 0: for taskname in", "from .admin import Sender lock = threading.Lock() class AdminMessageSender(Sender): \"\"\"AdminMessageSender to send the", "in tuple(self.servers): self._send_client_reply(message, taskname) def _send_client_reply(self, message, taskname): try: with self._set_up_channel(self.servers[taskname]) as channel:", "use this file except in compliance with the License. # You may obtain", "\"\"\" if self.rank == 0: for taskname in tuple(self.servers): try: with self._set_up_channel(self.servers[taskname]) as", "tuple(self.servers): self._send_client_reply(message, taskname) def _send_client_reply(self, message, taskname): try: with self._set_up_channel(self.servers[taskname]) as channel: stub", "as ThreadPool import grpc import nvflare.private.fed.protos.admin_pb2 as admin_msg import nvflare.private.fed.protos.admin_pb2_grpc as admin_service from", "message to the admin server.\"\"\" import threading from multiprocessing.dummy import Pool as ThreadPool", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "with open(self.ssl_cert, \"rb\") as f: certificate_chain = f.read() call_credentials = grpc.metadata_call_credentials( lambda context,", "\"\"\"Connect client to the server. Args: channel_dict: grpc channel parameters Returns: an initialised", "2.0 (the \"License\"); # you may not use this file except in compliance", ".admin import Sender lock = threading.Lock() class AdminMessageSender(Sender): \"\"\"AdminMessageSender to send the request", "to send the request message to the admin server.\"\"\" def __init__( self, client_name,", "with self._set_up_channel(self.servers[taskname]) as channel: stub = admin_service.AdminCommunicatingStub(channel) client = admin_msg.Client() client.client_name = self.client_name", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "channel_dict): \"\"\"Connect client to the server. Args: channel_dict: grpc channel parameters Returns: an", "server.\"\"\" import threading from multiprocessing.dummy import Pool as ThreadPool import grpc import nvflare.private.fed.protos.admin_pb2", "# # Unless required by applicable law or agreed to in writing, software", "self.client_name reply.message.CopyFrom(message_to_proto(message)) # reply.message = message_to_proto(message) stub.SendReply(reply) except BaseException: pass def retrieve_requests(self) ->", "express or implied. # See the License for the specific language governing permissions", "\"\"\"Send the message to retrieve pending requests from the Server. Returns: list of", "client_name, root_cert=None, ssl_cert=None, private_key=None, server_args=None, secure=False, is_multi_gpu=False, rank=0, ): \"\"\"To init the AdminMessageSender.", "message: Message): \"\"\"Send the processor results to server. Args: message: message \"\"\" if", "message_to_proto(message) stub.SendReply(reply) except BaseException: pass def retrieve_requests(self) -> [Message]: \"\"\"Send the message to", "governing permissions and # limitations under the License. \"\"\"This is the FLAdmin Client", "if self.rank == 0: # self.send_client_reply(message) for taskname in tuple(self.servers): self._send_client_reply(message, taskname) def", "message. Args: message: request message \"\"\" if self.rank == 0: # self.send_client_reply(message) for", "either express or implied. # See the License for the specific language governing", "request message to the admin server.\"\"\" import threading from multiprocessing.dummy import Pool as", "messages = [] if self.rank == 0: items = self.pool.map(self._retrieve_client_requests, tuple(self.servers)) for item", "parameters Returns: an initialised grpc channel \"\"\" if self.secure: with open(self.root_cert, \"rb\") as", "the specific language governing permissions and # limitations under the License. \"\"\"This is", "= f.read() with open(self.private_key, \"rb\") as f: private_key = f.read() with open(self.ssl_cert, \"rb\")", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "\"\"\"This is the FLAdmin Client to send the request message to the admin", "if self.rank == 0: items = self.pool.map(self._retrieve_client_requests, tuple(self.servers)) for item in items: messages.extend(item)", "self._set_up_channel(self.servers[taskname]) as channel: stub = admin_service.AdminCommunicatingStub(channel) reply = admin_msg.Reply() reply.client_name = self.client_name reply.message.CopyFrom(message_to_proto(message))", "channel: stub = admin_service.AdminCommunicatingStub(channel) reply = admin_msg.Reply() reply.client_name = self.client_name reply.message.CopyFrom(message_to_proto(message)) # reply.message", "stub = admin_service.AdminCommunicatingStub(channel) reply = admin_msg.Reply() reply.client_name = self.client_name reply.message.CopyFrom(message_to_proto(message)) # reply.message =", "reply.message = message_to_proto(message) stub.SendReply(reply) except BaseException: pass def retrieve_requests(self) -> [Message]: \"\"\"Send the", "admin server.\"\"\" def __init__( self, client_name, root_cert=None, ssl_cert=None, private_key=None, server_args=None, secure=False, is_multi_gpu=False, rank=0,", "server_args self.multi_gpu = is_multi_gpu self.rank = rank self.pool = ThreadPool(len(self.servers)) def send_reply(self, message:", "the License. # You may obtain a copy of the License at #", "ssl_cert self.private_key = private_key self.secure = secure self.servers = server_args self.multi_gpu = is_multi_gpu", "lambda context, callback: callback(((\"x-custom-token\", self.client_name),), None) ) credentials = grpc.ssl_channel_credentials( certificate_chain=certificate_chain, private_key=private_key, root_certificates=trusted_certs", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "server args secure: True/False is_multi_gpu: True/False rank: local process rank \"\"\" self.client_name =", "proto_to_message from .admin import Sender lock = threading.Lock() class AdminMessageSender(Sender): \"\"\"AdminMessageSender to send", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "with open(self.private_key, \"rb\") as f: private_key = f.read() with open(self.ssl_cert, \"rb\") as f:", "True/False is_multi_gpu: True/False rank: local process rank \"\"\" self.client_name = client_name self.root_cert =", "self.servers = server_args self.multi_gpu = is_multi_gpu self.rank = rank self.pool = ThreadPool(len(self.servers)) def", "private_key=None, server_args=None, secure=False, is_multi_gpu=False, rank=0, ): \"\"\"To init the AdminMessageSender. Args: client_name: client", "AdminMessageSender. Args: client_name: client name root_cert: root certificate ssl_cert: SSL certificate private_key: private", "License. \"\"\"This is the FLAdmin Client to send the request message to the", "tuple(self.servers)) for item in items: messages.extend(item) return messages def _retrieve_client_requests(self, taskname): try: message_list", "the FLAdmin Client to send the request message to the admin server.\"\"\" import", "for item in items: messages.extend(item) return messages def _retrieve_client_requests(self, taskname): try: message_list =", "\"\"\" self.client_name = client_name self.root_cert = root_cert self.ssl_cert = ssl_cert self.private_key = private_key", "client_name: client name root_cert: root certificate ssl_cert: SSL certificate private_key: private key server_args:", "stub.SendReply(reply) except BaseException: pass def retrieve_requests(self) -> [Message]: \"\"\"Send the message to retrieve", "def send_reply(self, message: Message): \"\"\"Call to send the request message. Args: message: request", "0: for taskname in tuple(self.servers): try: with self._set_up_channel(self.servers[taskname]) as channel: stub = admin_service.AdminCommunicatingStub(channel)", "= admin_service.AdminCommunicatingStub(channel) reply = admin_msg.Reply() reply.client_name = self.client_name reply.message.CopyFrom(message_to_proto(message)) stub.SendResult(reply) except BaseException: pass", "request message. Args: message: request message \"\"\" if self.rank == 0: # self.send_client_reply(message)", "[] if self.rank == 0: items = self.pool.map(self._retrieve_client_requests, tuple(self.servers)) for item in items:", "grpc.metadata_call_credentials( lambda context, callback: callback(((\"x-custom-token\", self.client_name),), None) ) credentials = grpc.ssl_channel_credentials( certificate_chain=certificate_chain, private_key=private_key,", "rank \"\"\" self.client_name = client_name self.root_cert = root_cert self.ssl_cert = ssl_cert self.private_key =", "with the License. # You may obtain a copy of the License at", "secure self.servers = server_args self.multi_gpu = is_multi_gpu self.rank = rank self.pool = ThreadPool(len(self.servers))", "Server. Returns: list of messages. \"\"\" messages = [] if self.rank == 0:", "Pool as ThreadPool import grpc import nvflare.private.fed.protos.admin_pb2 as admin_msg import nvflare.private.fed.protos.admin_pb2_grpc as admin_service", "grpc import nvflare.private.fed.protos.admin_pb2 as admin_msg import nvflare.private.fed.protos.admin_pb2_grpc as admin_service from nvflare.private.admin_defs import Message", "message_list = [] with self._set_up_channel(self.servers[taskname]) as channel: stub = admin_service.AdminCommunicatingStub(channel) client = admin_msg.Client()", "is the FLAdmin Client to send the request message to the admin server.\"\"\"", "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "to send the request message to the admin server.\"\"\" import threading from multiprocessing.dummy", "Args: message: request message \"\"\" if self.rank == 0: # self.send_client_reply(message) for taskname", "rights reserved. # # Licensed under the Apache License, Version 2.0 (the \"License\");", "open(self.root_cert, \"rb\") as f: trusted_certs = f.read() with open(self.private_key, \"rb\") as f: private_key", "law or agreed to in writing, software # distributed under the License is", "grpc.ssl_channel_credentials( certificate_chain=certificate_chain, private_key=private_key, root_certificates=trusted_certs ) composite_credentials = grpc.composite_channel_credentials(credentials, call_credentials) channel = grpc.secure_channel(**channel_dict, credentials=composite_credentials)", "the License for the specific language governing permissions and # limitations under the", "call_credentials = grpc.metadata_call_credentials( lambda context, callback: callback(((\"x-custom-token\", self.client_name),), None) ) credentials = grpc.ssl_channel_credentials(", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "the request message to the admin server.\"\"\" def __init__( self, client_name, root_cert=None, ssl_cert=None,", "init the AdminMessageSender. Args: client_name: client name root_cert: root certificate ssl_cert: SSL certificate", "key server_args: server args secure: True/False is_multi_gpu: True/False rank: local process rank \"\"\"", "admin_msg.Reply() reply.client_name = self.client_name reply.message.CopyFrom(message_to_proto(message)) # reply.message = message_to_proto(message) stub.SendReply(reply) except BaseException: pass", "reply.client_name = self.client_name reply.message.CopyFrom(message_to_proto(message)) stub.SendResult(reply) except BaseException: pass def _set_up_channel(self, channel_dict): \"\"\"Connect client", "admin_msg import nvflare.private.fed.protos.admin_pb2_grpc as admin_service from nvflare.private.admin_defs import Message from nvflare.private.fed.utils.messageproto import message_to_proto,", "from the Server. Returns: list of messages. \"\"\" messages = [] if self.rank", "\"\"\"AdminMessageSender to send the request message to the admin server.\"\"\" def __init__( self,", "from nvflare.private.fed.utils.messageproto import message_to_proto, proto_to_message from .admin import Sender lock = threading.Lock() class", "= self.pool.map(self._retrieve_client_requests, tuple(self.servers)) for item in items: messages.extend(item) return messages def _retrieve_client_requests(self, taskname):", "= admin_msg.Client() client.client_name = self.client_name messages = stub.Retrieve(client) for i in messages.message: message_list.append(proto_to_message(i))", "private_key=private_key, root_certificates=trusted_certs ) composite_credentials = grpc.composite_channel_credentials(credentials, call_credentials) channel = grpc.secure_channel(**channel_dict, credentials=composite_credentials) else: channel", "in compliance with the License. # You may obtain a copy of the", "as admin_msg import nvflare.private.fed.protos.admin_pb2_grpc as admin_service from nvflare.private.admin_defs import Message from nvflare.private.fed.utils.messageproto import", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "= client_name self.root_cert = root_cert self.ssl_cert = ssl_cert self.private_key = private_key self.secure =", "to send the request message. Args: message: request message \"\"\" if self.rank ==", "messages = None return message_list def send_result(self, message: Message): \"\"\"Send the processor results", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "import grpc import nvflare.private.fed.protos.admin_pb2 as admin_msg import nvflare.private.fed.protos.admin_pb2_grpc as admin_service from nvflare.private.admin_defs import", "pending requests from the Server. Returns: list of messages. \"\"\" messages = []", "See the License for the specific language governing permissions and # limitations under", "= admin_msg.Reply() reply.client_name = self.client_name reply.message.CopyFrom(message_to_proto(message)) stub.SendResult(reply) except BaseException: pass def _set_up_channel(self, channel_dict):", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "= root_cert self.ssl_cert = ssl_cert self.private_key = private_key self.secure = secure self.servers =", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "the License. \"\"\"This is the FLAdmin Client to send the request message to", "0: items = self.pool.map(self._retrieve_client_requests, tuple(self.servers)) for item in items: messages.extend(item) return messages def", "f: certificate_chain = f.read() call_credentials = grpc.metadata_call_credentials( lambda context, callback: callback(((\"x-custom-token\", self.client_name),), None)", "process rank \"\"\" self.client_name = client_name self.root_cert = root_cert self.ssl_cert = ssl_cert self.private_key", "= None return message_list def send_result(self, message: Message): \"\"\"Send the processor results to", "permissions and # limitations under the License. \"\"\"This is the FLAdmin Client to", "Returns: an initialised grpc channel \"\"\" if self.secure: with open(self.root_cert, \"rb\") as f:", "certificate_chain=certificate_chain, private_key=private_key, root_certificates=trusted_certs ) composite_credentials = grpc.composite_channel_credentials(credentials, call_credentials) channel = grpc.secure_channel(**channel_dict, credentials=composite_credentials) else:", "private_key self.secure = secure self.servers = server_args self.multi_gpu = is_multi_gpu self.rank = rank", "channel parameters Returns: an initialised grpc channel \"\"\" if self.secure: with open(self.root_cert, \"rb\")", "nvflare.private.fed.protos.admin_pb2_grpc as admin_service from nvflare.private.admin_defs import Message from nvflare.private.fed.utils.messageproto import message_to_proto, proto_to_message from", "context, callback: callback(((\"x-custom-token\", self.client_name),), None) ) credentials = grpc.ssl_channel_credentials( certificate_chain=certificate_chain, private_key=private_key, root_certificates=trusted_certs )", "private key server_args: server args secure: True/False is_multi_gpu: True/False rank: local process rank", "class AdminMessageSender(Sender): \"\"\"AdminMessageSender to send the request message to the admin server.\"\"\" def", "root_cert=None, ssl_cert=None, private_key=None, server_args=None, secure=False, is_multi_gpu=False, rank=0, ): \"\"\"To init the AdminMessageSender. Args:", "as channel: stub = admin_service.AdminCommunicatingStub(channel) reply = admin_msg.Reply() reply.client_name = self.client_name reply.message.CopyFrom(message_to_proto(message)) #", "callback: callback(((\"x-custom-token\", self.client_name),), None) ) credentials = grpc.ssl_channel_credentials( certificate_chain=certificate_chain, private_key=private_key, root_certificates=trusted_certs ) composite_credentials", "self.multi_gpu = is_multi_gpu self.rank = rank self.pool = ThreadPool(len(self.servers)) def send_reply(self, message: Message):", "= admin_service.AdminCommunicatingStub(channel) client = admin_msg.Client() client.client_name = self.client_name messages = stub.Retrieve(client) for i", "# limitations under the License. \"\"\"This is the FLAdmin Client to send the", "def _set_up_channel(self, channel_dict): \"\"\"Connect client to the server. Args: channel_dict: grpc channel parameters", "Version 2.0 (the \"License\"); # you may not use this file except in", "except in compliance with the License. # You may obtain a copy of", "message: Message): \"\"\"Call to send the request message. Args: message: request message \"\"\"", "taskname in tuple(self.servers): try: with self._set_up_channel(self.servers[taskname]) as channel: stub = admin_service.AdminCommunicatingStub(channel) reply =", "= [] with self._set_up_channel(self.servers[taskname]) as channel: stub = admin_service.AdminCommunicatingStub(channel) client = admin_msg.Client() client.client_name", "[Message]: \"\"\"Send the message to retrieve pending requests from the Server. Returns: list", "admin_msg.Reply() reply.client_name = self.client_name reply.message.CopyFrom(message_to_proto(message)) stub.SendResult(reply) except BaseException: pass def _set_up_channel(self, channel_dict): \"\"\"Connect", "ssl_cert: SSL certificate private_key: private key server_args: server args secure: True/False is_multi_gpu: True/False", "client name root_cert: root certificate ssl_cert: SSL certificate private_key: private key server_args: server", "channel_dict: grpc channel parameters Returns: an initialised grpc channel \"\"\" if self.secure: with", "language governing permissions and # limitations under the License. \"\"\"This is the FLAdmin", "request message \"\"\" if self.rank == 0: # self.send_client_reply(message) for taskname in tuple(self.servers):", "= f.read() call_credentials = grpc.metadata_call_credentials( lambda context, callback: callback(((\"x-custom-token\", self.client_name),), None) ) credentials", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "Message from nvflare.private.fed.utils.messageproto import message_to_proto, proto_to_message from .admin import Sender lock = threading.Lock()", "message: message \"\"\" if self.rank == 0: for taskname in tuple(self.servers): try: with", "ssl_cert=None, private_key=None, server_args=None, secure=False, is_multi_gpu=False, rank=0, ): \"\"\"To init the AdminMessageSender. Args: client_name:", "requests from the Server. Returns: list of messages. \"\"\" messages = [] if", "_set_up_channel(self, channel_dict): \"\"\"Connect client to the server. Args: channel_dict: grpc channel parameters Returns:", "trusted_certs = f.read() with open(self.private_key, \"rb\") as f: private_key = f.read() with open(self.ssl_cert,", "items = self.pool.map(self._retrieve_client_requests, tuple(self.servers)) for item in items: messages.extend(item) return messages def _retrieve_client_requests(self,", "root_certificates=trusted_certs ) composite_credentials = grpc.composite_channel_credentials(credentials, call_credentials) channel = grpc.secure_channel(**channel_dict, credentials=composite_credentials) else: channel =", "\"\"\"Call to send the request message. Args: message: request message \"\"\" if self.rank", "import Sender lock = threading.Lock() class AdminMessageSender(Sender): \"\"\"AdminMessageSender to send the request message", "CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0", "= [] if self.rank == 0: items = self.pool.map(self._retrieve_client_requests, tuple(self.servers)) for item in", "with self._set_up_channel(self.servers[taskname]) as channel: stub = admin_service.AdminCommunicatingStub(channel) reply = admin_msg.Reply() reply.client_name = self.client_name", "= admin_msg.Reply() reply.client_name = self.client_name reply.message.CopyFrom(message_to_proto(message)) # reply.message = message_to_proto(message) stub.SendReply(reply) except BaseException:", "= grpc.ssl_channel_credentials( certificate_chain=certificate_chain, private_key=private_key, root_certificates=trusted_certs ) composite_credentials = grpc.composite_channel_credentials(credentials, call_credentials) channel = grpc.secure_channel(**channel_dict,", "reply = admin_msg.Reply() reply.client_name = self.client_name reply.message.CopyFrom(message_to_proto(message)) # reply.message = message_to_proto(message) stub.SendReply(reply) except", "message to the admin server.\"\"\" def __init__( self, client_name, root_cert=None, ssl_cert=None, private_key=None, server_args=None,", "-> [Message]: \"\"\"Send the message to retrieve pending requests from the Server. Returns:", "ThreadPool(len(self.servers)) def send_reply(self, message: Message): \"\"\"Call to send the request message. Args: message:", "Sender lock = threading.Lock() class AdminMessageSender(Sender): \"\"\"AdminMessageSender to send the request message to", "rank self.pool = ThreadPool(len(self.servers)) def send_reply(self, message: Message): \"\"\"Call to send the request", "def __init__( self, client_name, root_cert=None, ssl_cert=None, private_key=None, server_args=None, secure=False, is_multi_gpu=False, rank=0, ): \"\"\"To", "channel: stub = admin_service.AdminCommunicatingStub(channel) client = admin_msg.Client() client.client_name = self.client_name messages = stub.Retrieve(client)", "nvflare.private.fed.utils.messageproto import message_to_proto, proto_to_message from .admin import Sender lock = threading.Lock() class AdminMessageSender(Sender):", "self.secure = secure self.servers = server_args self.multi_gpu = is_multi_gpu self.rank = rank self.pool", "= secure self.servers = server_args self.multi_gpu = is_multi_gpu self.rank = rank self.pool =", "self._set_up_channel(self.servers[taskname]) as channel: stub = admin_service.AdminCommunicatingStub(channel) client = admin_msg.Client() client.client_name = self.client_name messages", "f: private_key = f.read() with open(self.ssl_cert, \"rb\") as f: certificate_chain = f.read() call_credentials", "rank: local process rank \"\"\" self.client_name = client_name self.root_cert = root_cert self.ssl_cert =", "send the request message. Args: message: request message \"\"\" if self.rank == 0:", "client_name self.root_cert = root_cert self.ssl_cert = ssl_cert self.private_key = private_key self.secure = secure", "= stub.Retrieve(client) for i in messages.message: message_list.append(proto_to_message(i)) except Exception as e: messages =", "reply.message.CopyFrom(message_to_proto(message)) # reply.message = message_to_proto(message) stub.SendReply(reply) except BaseException: pass def retrieve_requests(self) -> [Message]:", "= self.client_name messages = stub.Retrieve(client) for i in messages.message: message_list.append(proto_to_message(i)) except Exception as", "= rank self.pool = ThreadPool(len(self.servers)) def send_reply(self, message: Message): \"\"\"Call to send the", "= self.client_name reply.message.CopyFrom(message_to_proto(message)) stub.SendResult(reply) except BaseException: pass def _set_up_channel(self, channel_dict): \"\"\"Connect client to", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "in messages.message: message_list.append(proto_to_message(i)) except Exception as e: messages = None return message_list def", "is_multi_gpu self.rank = rank self.pool = ThreadPool(len(self.servers)) def send_reply(self, message: Message): \"\"\"Call to", "local process rank \"\"\" self.client_name = client_name self.root_cert = root_cert self.ssl_cert = ssl_cert", "and # limitations under the License. \"\"\"This is the FLAdmin Client to send", "Returns: list of messages. \"\"\" messages = [] if self.rank == 0: items", "request message to the admin server.\"\"\" def __init__( self, client_name, root_cert=None, ssl_cert=None, private_key=None,", "processor results to server. Args: message: message \"\"\" if self.rank == 0: for", "\"\"\" messages = [] if self.rank == 0: items = self.pool.map(self._retrieve_client_requests, tuple(self.servers)) for", "self.root_cert = root_cert self.ssl_cert = ssl_cert self.private_key = private_key self.secure = secure self.servers" ]
[ "code_name = 'chinook_models_nojoined.py' generate_code_file(code_name, engine, nojoined=True) import_code(code_name) def generate_code(engine, **kwargs): \"\"\" CodeGenerator.__init__(self, metadata,", "nobackrefs=False, flask=False, ignore_cols=None, noclasses=False, nocomments=False) \"\"\" metadata = MetaData() metadata.reflect(bind=engine) codegen = CodeGenerator(metadata,", "import_module def main(): connection_string = 'sqlite:///chinook.db' engine = create_engine(connection_string) code_name = 'chinook_models_nojoined.py' generate_code_file(code_name,", "nojoined=True) import_code(code_name) def generate_code(engine, **kwargs): \"\"\" CodeGenerator.__init__(self, metadata, noindexes=False, noconstraints=False, nojoined=False, noinflect=False, nobackrefs=False,", "noclasses=False, nocomments=False) \"\"\" metadata = MetaData() metadata.reflect(bind=engine) codegen = CodeGenerator(metadata, **kwargs) sio =", "from sqlacodegen.codegen import CodeGenerator from importlib import import_module def main(): connection_string = 'sqlite:///chinook.db'", "StringIO from sqlalchemy import create_engine from sqlalchemy import MetaData from sqlacodegen.codegen import CodeGenerator", "metadata = MetaData() metadata.reflect(bind=engine) codegen = CodeGenerator(metadata, **kwargs) sio = StringIO() codegen.render(sio) return", "sio.getvalue() def generate_file(file_name, text): with open(file_name, 'w') as text_file: text_file.write(text) def generate_code_file(file_name, engine,", "'w') as text_file: text_file.write(text) def generate_code_file(file_name, engine, **kwargs): generate_file(file_name, generate_code(engine, **kwargs)) def import_code(file_name):", "def generate_code(engine, **kwargs): \"\"\" CodeGenerator.__init__(self, metadata, noindexes=False, noconstraints=False, nojoined=False, noinflect=False, nobackrefs=False, flask=False, ignore_cols=None,", "text): with open(file_name, 'w') as text_file: text_file.write(text) def generate_code_file(file_name, engine, **kwargs): generate_file(file_name, generate_code(engine,", "**kwargs): \"\"\" CodeGenerator.__init__(self, metadata, noindexes=False, noconstraints=False, nojoined=False, noinflect=False, nobackrefs=False, flask=False, ignore_cols=None, noclasses=False, nocomments=False)", "CodeGenerator from importlib import import_module def main(): connection_string = 'sqlite:///chinook.db' engine = create_engine(connection_string)", "CodeGenerator.__init__(self, metadata, noindexes=False, noconstraints=False, nojoined=False, noinflect=False, nobackrefs=False, flask=False, ignore_cols=None, noclasses=False, nocomments=False) \"\"\" metadata", "def generate_code_file(file_name, engine, **kwargs): generate_file(file_name, generate_code(engine, **kwargs)) def import_code(file_name): import_module(file_name) if __name__ ==", "'chinook_models_nojoined.py' generate_code_file(code_name, engine, nojoined=True) import_code(code_name) def generate_code(engine, **kwargs): \"\"\" CodeGenerator.__init__(self, metadata, noindexes=False, noconstraints=False,", "generate_code_file(code_name, engine, nojoined=True) import_code(code_name) def generate_code(engine, **kwargs): \"\"\" CodeGenerator.__init__(self, metadata, noindexes=False, noconstraints=False, nojoined=False,", "nojoined=False, noinflect=False, nobackrefs=False, flask=False, ignore_cols=None, noclasses=False, nocomments=False) \"\"\" metadata = MetaData() metadata.reflect(bind=engine) codegen", "= MetaData() metadata.reflect(bind=engine) codegen = CodeGenerator(metadata, **kwargs) sio = StringIO() codegen.render(sio) return sio.getvalue()", "engine, **kwargs): generate_file(file_name, generate_code(engine, **kwargs)) def import_code(file_name): import_module(file_name) if __name__ == '__main__': main()", "sio = StringIO() codegen.render(sio) return sio.getvalue() def generate_file(file_name, text): with open(file_name, 'w') as", "import StringIO from sqlalchemy import create_engine from sqlalchemy import MetaData from sqlacodegen.codegen import", "text_file: text_file.write(text) def generate_code_file(file_name, engine, **kwargs): generate_file(file_name, generate_code(engine, **kwargs)) def import_code(file_name): import_module(file_name) if", "codegen.render(sio) return sio.getvalue() def generate_file(file_name, text): with open(file_name, 'w') as text_file: text_file.write(text) def", "from importlib import import_module def main(): connection_string = 'sqlite:///chinook.db' engine = create_engine(connection_string) code_name", "from sqlalchemy import MetaData from sqlacodegen.codegen import CodeGenerator from importlib import import_module def", "generate_file(file_name, text): with open(file_name, 'w') as text_file: text_file.write(text) def generate_code_file(file_name, engine, **kwargs): generate_file(file_name,", "noinflect=False, nobackrefs=False, flask=False, ignore_cols=None, noclasses=False, nocomments=False) \"\"\" metadata = MetaData() metadata.reflect(bind=engine) codegen =", "import_code(code_name) def generate_code(engine, **kwargs): \"\"\" CodeGenerator.__init__(self, metadata, noindexes=False, noconstraints=False, nojoined=False, noinflect=False, nobackrefs=False, flask=False,", "= 'chinook_models_nojoined.py' generate_code_file(code_name, engine, nojoined=True) import_code(code_name) def generate_code(engine, **kwargs): \"\"\" CodeGenerator.__init__(self, metadata, noindexes=False,", "nocomments=False) \"\"\" metadata = MetaData() metadata.reflect(bind=engine) codegen = CodeGenerator(metadata, **kwargs) sio = StringIO()", "CodeGenerator(metadata, **kwargs) sio = StringIO() codegen.render(sio) return sio.getvalue() def generate_file(file_name, text): with open(file_name,", "connection_string = 'sqlite:///chinook.db' engine = create_engine(connection_string) code_name = 'chinook_models_nojoined.py' generate_code_file(code_name, engine, nojoined=True) import_code(code_name)", "'sqlite:///chinook.db' engine = create_engine(connection_string) code_name = 'chinook_models_nojoined.py' generate_code_file(code_name, engine, nojoined=True) import_code(code_name) def generate_code(engine,", "noindexes=False, noconstraints=False, nojoined=False, noinflect=False, nobackrefs=False, flask=False, ignore_cols=None, noclasses=False, nocomments=False) \"\"\" metadata = MetaData()", "generate_code(engine, **kwargs): \"\"\" CodeGenerator.__init__(self, metadata, noindexes=False, noconstraints=False, nojoined=False, noinflect=False, nobackrefs=False, flask=False, ignore_cols=None, noclasses=False,", "= CodeGenerator(metadata, **kwargs) sio = StringIO() codegen.render(sio) return sio.getvalue() def generate_file(file_name, text): with", "sqlacodegen.codegen import CodeGenerator from importlib import import_module def main(): connection_string = 'sqlite:///chinook.db' engine", "from io import StringIO from sqlalchemy import create_engine from sqlalchemy import MetaData from", "main(): connection_string = 'sqlite:///chinook.db' engine = create_engine(connection_string) code_name = 'chinook_models_nojoined.py' generate_code_file(code_name, engine, nojoined=True)", "StringIO() codegen.render(sio) return sio.getvalue() def generate_file(file_name, text): with open(file_name, 'w') as text_file: text_file.write(text)", "create_engine from sqlalchemy import MetaData from sqlacodegen.codegen import CodeGenerator from importlib import import_module", "engine = create_engine(connection_string) code_name = 'chinook_models_nojoined.py' generate_code_file(code_name, engine, nojoined=True) import_code(code_name) def generate_code(engine, **kwargs):", "engine, nojoined=True) import_code(code_name) def generate_code(engine, **kwargs): \"\"\" CodeGenerator.__init__(self, metadata, noindexes=False, noconstraints=False, nojoined=False, noinflect=False,", "ignore_cols=None, noclasses=False, nocomments=False) \"\"\" metadata = MetaData() metadata.reflect(bind=engine) codegen = CodeGenerator(metadata, **kwargs) sio", "create_engine(connection_string) code_name = 'chinook_models_nojoined.py' generate_code_file(code_name, engine, nojoined=True) import_code(code_name) def generate_code(engine, **kwargs): \"\"\" CodeGenerator.__init__(self,", "import import_module def main(): connection_string = 'sqlite:///chinook.db' engine = create_engine(connection_string) code_name = 'chinook_models_nojoined.py'", "MetaData from sqlacodegen.codegen import CodeGenerator from importlib import import_module def main(): connection_string =", "sqlalchemy import MetaData from sqlacodegen.codegen import CodeGenerator from importlib import import_module def main():", "open(file_name, 'w') as text_file: text_file.write(text) def generate_code_file(file_name, engine, **kwargs): generate_file(file_name, generate_code(engine, **kwargs)) def", "MetaData() metadata.reflect(bind=engine) codegen = CodeGenerator(metadata, **kwargs) sio = StringIO() codegen.render(sio) return sio.getvalue() def", "noconstraints=False, nojoined=False, noinflect=False, nobackrefs=False, flask=False, ignore_cols=None, noclasses=False, nocomments=False) \"\"\" metadata = MetaData() metadata.reflect(bind=engine)", "= create_engine(connection_string) code_name = 'chinook_models_nojoined.py' generate_code_file(code_name, engine, nojoined=True) import_code(code_name) def generate_code(engine, **kwargs): \"\"\"", "importlib import import_module def main(): connection_string = 'sqlite:///chinook.db' engine = create_engine(connection_string) code_name =", "return sio.getvalue() def generate_file(file_name, text): with open(file_name, 'w') as text_file: text_file.write(text) def generate_code_file(file_name,", "import create_engine from sqlalchemy import MetaData from sqlacodegen.codegen import CodeGenerator from importlib import", "from sqlalchemy import create_engine from sqlalchemy import MetaData from sqlacodegen.codegen import CodeGenerator from", "import MetaData from sqlacodegen.codegen import CodeGenerator from importlib import import_module def main(): connection_string", "import CodeGenerator from importlib import import_module def main(): connection_string = 'sqlite:///chinook.db' engine =", "<filename>src/pandalchemy/generate_code.py from io import StringIO from sqlalchemy import create_engine from sqlalchemy import MetaData", "= 'sqlite:///chinook.db' engine = create_engine(connection_string) code_name = 'chinook_models_nojoined.py' generate_code_file(code_name, engine, nojoined=True) import_code(code_name) def", "codegen = CodeGenerator(metadata, **kwargs) sio = StringIO() codegen.render(sio) return sio.getvalue() def generate_file(file_name, text):", "= StringIO() codegen.render(sio) return sio.getvalue() def generate_file(file_name, text): with open(file_name, 'w') as text_file:", "def generate_file(file_name, text): with open(file_name, 'w') as text_file: text_file.write(text) def generate_code_file(file_name, engine, **kwargs):", "flask=False, ignore_cols=None, noclasses=False, nocomments=False) \"\"\" metadata = MetaData() metadata.reflect(bind=engine) codegen = CodeGenerator(metadata, **kwargs)", "\"\"\" CodeGenerator.__init__(self, metadata, noindexes=False, noconstraints=False, nojoined=False, noinflect=False, nobackrefs=False, flask=False, ignore_cols=None, noclasses=False, nocomments=False) \"\"\"", "metadata, noindexes=False, noconstraints=False, nojoined=False, noinflect=False, nobackrefs=False, flask=False, ignore_cols=None, noclasses=False, nocomments=False) \"\"\" metadata =", "metadata.reflect(bind=engine) codegen = CodeGenerator(metadata, **kwargs) sio = StringIO() codegen.render(sio) return sio.getvalue() def generate_file(file_name,", "sqlalchemy import create_engine from sqlalchemy import MetaData from sqlacodegen.codegen import CodeGenerator from importlib", "\"\"\" metadata = MetaData() metadata.reflect(bind=engine) codegen = CodeGenerator(metadata, **kwargs) sio = StringIO() codegen.render(sio)", "def main(): connection_string = 'sqlite:///chinook.db' engine = create_engine(connection_string) code_name = 'chinook_models_nojoined.py' generate_code_file(code_name, engine,", "as text_file: text_file.write(text) def generate_code_file(file_name, engine, **kwargs): generate_file(file_name, generate_code(engine, **kwargs)) def import_code(file_name): import_module(file_name)", "with open(file_name, 'w') as text_file: text_file.write(text) def generate_code_file(file_name, engine, **kwargs): generate_file(file_name, generate_code(engine, **kwargs))", "text_file.write(text) def generate_code_file(file_name, engine, **kwargs): generate_file(file_name, generate_code(engine, **kwargs)) def import_code(file_name): import_module(file_name) if __name__", "io import StringIO from sqlalchemy import create_engine from sqlalchemy import MetaData from sqlacodegen.codegen", "generate_code_file(file_name, engine, **kwargs): generate_file(file_name, generate_code(engine, **kwargs)) def import_code(file_name): import_module(file_name) if __name__ == '__main__':", "**kwargs) sio = StringIO() codegen.render(sio) return sio.getvalue() def generate_file(file_name, text): with open(file_name, 'w')" ]
[ "args.model_name) # if we find an optimized model, then we will use that", "volume_mounts.extend([ \"--volume\", \"{}:{}\".format(output_dir, output_dir)]) docker_run_cmd = [\"docker\", \"run\"] # only use -it when", "KIND, either express or implied. # See the License for the specific language", "action=\"store_true\") return arg_parser.parse_known_args(args) def validate_args(self, args): \"\"\"validate the args\"\"\" # validate the shared", "Unless required by applicable law or agreed to in writing, software # distributed", "run one time without this flag # to get stuff installed # Add", "\"--env\", \"CHECKPOINT_DIRECTORY_VOL={}\".format(args.checkpoint), \"--env\", \"EXTERNAL_MODELS_SOURCE_DIRECTORY={}\".format(args.model_source_dir), \"--env\", \"INTELAI_MODELS={}\".format(intelai_models), \"--env\", \"BENCHMARK_SCRIPTS={}\".format(benchmark_scripts), \"--env\", \"SOCKET_ID={}\".format(args.socket_id), \"--env\", \"MODEL_NAME={}\".format(args.model_name), \"--env\",", "debugging, otherwise we might get TTY error if args.debug: docker_run_cmd.append(\"-it\") docker_run_cmd = docker_run_cmd", "= os.path.join(benchmark_scripts, os.pardir, \"models\") if args.model_name: # find the path to the model's", "running the benchmarking job. \"\"\" benchmark_scripts = os.path.dirname(os.path.realpath(__file__)) intelai_models = os.path.join(benchmark_scripts, os.pardir, \"models\")", "\\ else \"\" in_graph_filename = os.path.basename(args.input_graph) if \\ args.input_graph else \"\" env_vars =", "# limitations under the License. # # SPDX-License-Identifier: EPL-2.0 # from __future__ import", "\"\"\" benchmark_scripts = os.path.dirname(os.path.realpath(__file__)) intelai_models = os.path.join(benchmark_scripts, os.pardir, \"models\") if args.model_name: # find", "\"SOCKET_ID={}\".format(args.socket_id), \"--env\", \"MODEL_NAME={}\".format(args.model_name), \"--env\", \"MODE={}\".format(args.mode), \"--env\", \"PRECISION={}\".format(args.precision), \"--env\", \"VERBOSE={}\".format(args.verbose), \"--env\", \"BATCH_SIZE={}\".format(args.batch_size), \"--env\", \"WORKSPACE={}\".format(workspace),", "default we will install, user needs to set NOINSTALL=True # manually after they", "job based on the specified args \"\"\" def main(self): args, unknown = self.parse_args(sys.argv[1:])", "mode # since they need to run one time without this flag #", "_run_docker_cmd(self, docker_run_cmd): \"\"\"runs docker proc and exits on ctrl c\"\"\" p = subprocess.Popen(docker_run_cmd,", "mount log dir otherwise since default is workspace folder mount_output_dir = True output_dir", "match raise ValueError(\"Found multiple model locations for {} {} {}\" .format(args.framework, args.model_name, args.precision))", "the use case name from the path use_case = str(dir_list[framework_index - 1]) #", "\"--env\", \"OUTPUT_DIR={}\".format(output_dir)] # by default we will install, user needs to set NOINSTALL=True", "this file except in compliance with the License. # You may obtain a", "p = subprocess.Popen(docker_run_cmd, preexec_fn=os.setsid) try: p.communicate() except KeyboardInterrupt: os.killpg(os.getpgid(p.pid), signal.SIGKILL) if __name__ ==", "model directory optimized_model_dir = os.path.join( benchmark_scripts, os.pardir, \"models\", use_case, args.framework, args.model_name) # if", "args.model_name, args.precision)) elif len(matches) == 0: raise ValueError(\"No model was found for {}", "division from __future__ import print_function import glob import os import signal import subprocess", "coding: utf-8 -*- # # Copyright (c) 2018 Intel Corporation # # Licensed", "mount_output_dir = False output_dir = os.path.join(workspace, 'logs') if args.output_dir != \"/models/benchmarks/common/tensorflow/logs\": # we", "enable # benchmark_only as the default if not args.benchmark_only and not args.accuracy_only: args.benchmark_only", "find the last occurrence of framework in the list framework_index = len(dir_list) -", "framework in the list framework_index = len(dir_list) - 1 - dir_list[::-1].index( args.framework) #", "\"ACCURACY_ONLY={}\".format(args.accuracy_only), \"--env\", \"OUTPUT_RESULTS={}\".format(args.output_results), \"--env\", \"NOINSTALL=False\", \"--env\", \"OUTPUT_DIR={}\".format(output_dir)] # by default we will install,", "ANY KIND, either express or implied. # See the License for the specific", "\"\"\"Launches benchmarking job based on the specified args \"\"\" def main(self): args, unknown", "\"--env\", \"NOINSTALL=False\", \"--env\", \"OUTPUT_DIR={}\".format(output_dir)] # by default we will install, user needs to", "\"--env\", \"NUM_INTER_THREADS={}\".format(args.num_inter_threads), \"--env\", \"NUM_INTRA_THREADS={}\".format(args.num_intra_threads), \"--env\", \"DATASET_LOCATION=/dataset\", \"--env\", \"CHECKPOINT_DIRECTORY=/checkpoints\", \"--env\", \"BENCHMARK_ONLY={}\".format(args.benchmark_only), \"--env\", \"ACCURACY_ONLY={}\".format(args.accuracy_only), \"--env\",", "= docker_run_cmd + env_vars + volume_mounts + [ \"--privileged\", \"-u\", \"root:root\", \"-w\", workspace,", "# benchmark_only as the default if not args.benchmark_only and not args.accuracy_only: args.benchmark_only =", "search_path = os.path.join( benchmark_scripts, \"*\", args.framework, args.model_name, args.mode, args.precision) matches = glob.glob(search_path) if", "parents=[self._common_arg_parser], description=\"Parse args for benchmark interface\") arg_parser.add_argument( \"--docker-image\", help=\"Specify the docker image/tag to", "1 - dir_list[::-1].index( args.framework) # grab the use case name from the path", "args\"\"\" # validate the shared args first super(LaunchBenchmark, self).validate_args(args) # Check for spaces", "\"common\", args.framework) mount_output_dir = False output_dir = os.path.join(workspace, 'logs') if args.output_dir != \"/models/benchmarks/common/tensorflow/logs\":", "\"--env\", \"MODEL_NAME={}\".format(args.model_name), \"--env\", \"MODE={}\".format(args.mode), \"--env\", \"PRECISION={}\".format(args.precision), \"--env\", \"VERBOSE={}\".format(args.verbose), \"--env\", \"BATCH_SIZE={}\".format(args.batch_size), \"--env\", \"WORKSPACE={}\".format(workspace), \"--env\",", "\"{}:{}\".format(intelai_models, mount_intelai_models), \"--volume\", \"{}:/dataset\".format(args.data_location), \"--volume\", \"{}:/checkpoints\".format(args.checkpoint), \"--volume\", \"{}:/in_graph\".format(in_graph_dir)] if mount_output_dir: volume_mounts.extend([ \"--volume\", \"{}:{}\".format(output_dir,", "os.pardir, \"models\") if args.model_name: # find the path to the model's benchmarks folder", "output_dir = args.output_dir in_graph_dir = os.path.dirname(args.input_graph) if args.input_graph \\ else \"\" in_graph_filename =", "\"{}:{}\".format(benchmark_scripts, mount_benchmark), \"--volume\", \"{}:{}\".format(args.model_source_dir, mount_external_models_source), \"--volume\", \"{}:{}\".format(intelai_models, mount_intelai_models), \"--volume\", \"{}:/dataset\".format(args.data_location), \"--volume\", \"{}:/checkpoints\".format(args.checkpoint), \"--volume\",", "import base_benchmark_util class LaunchBenchmark(base_benchmark_util.BaseBenchmarkUtil): \"\"\"Launches benchmarking job based on the specified args \"\"\"", "ValueError(\"The specified framework is not supported: {}\". format(args.framework)) # if neither benchmark_only or", "\"https_proxy\", \"no_proxy\", ]: if not os.environ.get(environment_proxy_setting): continue env_vars.append(\"--env\") env_vars.append(\"{}={}\".format( environment_proxy_setting, os.environ.get(environment_proxy_setting) )) volume_mounts", "not have whitespace(s)\") # validate that we support this framework by checking folder", "\"\"\" Runs a docker container with the specified image and environment variables to", "ValueError(\"No model was found for {} {} {}\" .format(args.framework, args.model_name, args.precision)) # use", "the use case dir_list = matches[0].split(\"/\") # find the last occurrence of framework", "env vars for custom_arg in args.model_args: if \"=\" not in custom_arg: raise ValueError(\"Expected", "{}\". format(custom_arg)) env_vars.append(\"--env\") env_vars.append(\"{}\".format(custom_arg)) # Add proxy to env variables if any set", "run command:\\n{}\".format(docker_run_cmd)) self._run_docker_cmd(docker_run_cmd) def _run_docker_cmd(self, docker_run_cmd): \"\"\"runs docker proc and exits on ctrl", "they get into `--debug` mode # since they need to run one time", "\\ args.input_graph else \"\" env_vars = [\"--env\", \"DATASET_LOCATION_VOL={}\".format(args.data_location), \"--env\", \"CHECKPOINT_DIRECTORY_VOL={}\".format(args.checkpoint), \"--env\", \"EXTERNAL_MODELS_SOURCE_DIRECTORY={}\".format(args.model_source_dir), \"--env\",", "\"/bin/bash\"] if not args.debug: docker_run_cmd.append(\"start.sh\") if args.verbose: print(\"Docker run command:\\n{}\".format(docker_run_cmd)) self._run_docker_cmd(docker_run_cmd) def _run_docker_cmd(self,", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "last occurrence of framework in the list framework_index = len(dir_list) - 1 -", "\"--env\", \"USE_CASE={}\".format(use_case), \"--env\", \"FRAMEWORK={}\".format(args.framework), \"--env\", \"NUM_CORES={}\".format(args.num_cores), \"--env\", \"NUM_INTER_THREADS={}\".format(args.num_inter_threads), \"--env\", \"NUM_INTRA_THREADS={}\".format(args.num_intra_threads), \"--env\", \"DATASET_LOCATION=/dataset\", \"--env\",", "\"--env\", \"VERBOSE={}\".format(args.verbose), \"--env\", \"BATCH_SIZE={}\".format(args.batch_size), \"--env\", \"WORKSPACE={}\".format(workspace), \"--env\", \"IN_GRAPH=/in_graph/{}\".format(in_graph_filename), \"--env\", \"MOUNT_BENCHMARK={}\".format(mount_benchmark), \"--env\", \"MOUNT_EXTERNAL_MODELS_SOURCE={}\".format(mount_external_models_source), \"--env\",", "docker_run_cmd = [\"docker\", \"run\"] # only use -it when debugging, otherwise we might", "len(matches) == 0: raise ValueError(\"No model was found for {} {} {}\" .format(args.framework,", "\"--env\", \"WORKSPACE={}\".format(workspace), \"--env\", \"IN_GRAPH=/in_graph/{}\".format(in_graph_filename), \"--env\", \"MOUNT_BENCHMARK={}\".format(mount_benchmark), \"--env\", \"MOUNT_EXTERNAL_MODELS_SOURCE={}\".format(mount_external_models_source), \"--env\", \"MOUNT_INTELAI_MODELS_SOURCE={}\".format(mount_intelai_models), \"--env\", \"USE_CASE={}\".format(use_case), \"--env\",", "raise ValueError(\"No model was found for {} {} {}\" .format(args.framework, args.model_name, args.precision)) #", "only used with the launch script arg_parser = ArgumentParser( parents=[self._common_arg_parser], description=\"Parse args for", "arg_parser = ArgumentParser( parents=[self._common_arg_parser], description=\"Parse args for benchmark interface\") arg_parser.add_argument( \"--docker-image\", help=\"Specify the", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "string \" \"should not have whitespace(s)\") # validate that we support this framework", "neither benchmark_only or accuracy_only are specified, then enable # benchmark_only as the default", "\"--env\", \"OUTPUT_RESULTS={}\".format(args.output_results), \"--env\", \"NOINSTALL=False\", \"--env\", \"OUTPUT_DIR={}\".format(output_dir)] # by default we will install, user", "args in the format \" \"`name=value` but received: {}\". format(custom_arg)) env_vars.append(\"--env\") env_vars.append(\"{}\".format(custom_arg)) #", "to the model's benchmarks folder search_path = os.path.join( benchmark_scripts, \"*\", args.framework, args.model_name, args.mode,", "in docker image if ' ' in args.docker_image: raise ValueError(\"docker image string \"", "def main(self): args, unknown = self.parse_args(sys.argv[1:]) try: self.validate_args(args) except (IOError, ValueError) as e:", "\"FRAMEWORK={}\".format(args.framework), \"--env\", \"NUM_CORES={}\".format(args.num_cores), \"--env\", \"NUM_INTER_THREADS={}\".format(args.num_inter_threads), \"--env\", \"NUM_INTRA_THREADS={}\".format(args.num_intra_threads), \"--env\", \"DATASET_LOCATION=/dataset\", \"--env\", \"CHECKPOINT_DIRECTORY=/checkpoints\", \"--env\", \"BENCHMARK_ONLY={}\".format(args.benchmark_only),", "OF ANY KIND, either express or implied. # See the License for the", "args, unknown = self.parse_args(sys.argv[1:]) try: self.validate_args(args) except (IOError, ValueError) as e: print(\"\\nError: {}\".format(e))", "are only used with the launch script arg_parser = ArgumentParser( parents=[self._common_arg_parser], description=\"Parse args", "\"*\", args.framework, args.model_name, args.mode, args.precision) matches = glob.glob(search_path) if len(matches) > 1: #", "in custom_arg: raise ValueError(\"Expected model args in the format \" \"`name=value` but received:", "args.precision)) # use the benchmarks directory path to find the use case dir_list", "\"EXTERNAL_MODELS_SOURCE_DIRECTORY={}\".format(args.model_source_dir), \"--env\", \"INTELAI_MODELS={}\".format(intelai_models), \"--env\", \"BENCHMARK_SCRIPTS={}\".format(benchmark_scripts), \"--env\", \"SOCKET_ID={}\".format(args.socket_id), \"--env\", \"MODEL_NAME={}\".format(args.model_name), \"--env\", \"MODE={}\".format(args.mode), \"--env\", \"PRECISION={}\".format(args.precision),", "environment_proxy_setting in [ \"http_proxy\", \"ftp_proxy\", \"https_proxy\", \"no_proxy\", ]: if not os.environ.get(environment_proxy_setting): continue env_vars.append(\"--env\")", "in [ \"http_proxy\", \"ftp_proxy\", \"https_proxy\", \"no_proxy\", ]: if not os.environ.get(environment_proxy_setting): continue env_vars.append(\"--env\") env_vars.append(\"{}={}\".format(", "-*- # # Copyright (c) 2018 Intel Corporation # # Licensed under the", "required=True) arg_parser.add_argument( \"--debug\", help=\"Launches debug mode which doesn't execute \" \"start.sh\", action=\"store_true\") return", ".format(args.framework, args.model_name, args.precision)) elif len(matches) == 0: raise ValueError(\"No model was found for", "image/tag to use\", dest=\"docker_image\", default=None, required=True) arg_parser.add_argument( \"--debug\", help=\"Launches debug mode which doesn't", "need to run one time without this flag # to get stuff installed", "to run one time without this flag # to get stuff installed #", "docker image if ' ' in args.docker_image: raise ValueError(\"docker image string \" \"should", "\"WORKSPACE={}\".format(workspace), \"--env\", \"IN_GRAPH=/in_graph/{}\".format(in_graph_filename), \"--env\", \"MOUNT_BENCHMARK={}\".format(mount_benchmark), \"--env\", \"MOUNT_EXTERNAL_MODELS_SOURCE={}\".format(mount_external_models_source), \"--env\", \"MOUNT_INTELAI_MODELS_SOURCE={}\".format(mount_intelai_models), \"--env\", \"USE_CASE={}\".format(use_case), \"--env\", \"FRAMEWORK={}\".format(args.framework),", "Check for spaces in docker image if ' ' in args.docker_image: raise ValueError(\"docker", "= [\"--volume\", \"{}:{}\".format(benchmark_scripts, mount_benchmark), \"--volume\", \"{}:{}\".format(args.model_source_dir, mount_external_models_source), \"--volume\", \"{}:{}\".format(intelai_models, mount_intelai_models), \"--volume\", \"{}:/dataset\".format(args.data_location), \"--volume\",", "use the benchmarks directory path to find the use case dir_list = matches[0].split(\"/\")", "[ \"--privileged\", \"-u\", \"root:root\", \"-w\", workspace, args.docker_image, \"/bin/bash\"] if not args.debug: docker_run_cmd.append(\"start.sh\") if", "= os.path.dirname(os.path.realpath(__file__)) intelai_models = os.path.join(benchmark_scripts, os.pardir, \"models\") if args.model_name: # find the path", "description=\"Parse args for benchmark interface\") arg_parser.add_argument( \"--docker-image\", help=\"Specify the docker image/tag to use\",", "\"\"\"validate the args\"\"\" # validate the shared args first super(LaunchBenchmark, self).validate_args(args) # Check", "start running the benchmarking job. \"\"\" benchmark_scripts = os.path.dirname(os.path.realpath(__file__)) intelai_models = os.path.join(benchmark_scripts, os.pardir,", "directory optimized_model_dir = os.path.join( benchmark_scripts, os.pardir, \"models\", use_case, args.framework, args.model_name) # if we", "\"--env\", \"DATASET_LOCATION=/dataset\", \"--env\", \"CHECKPOINT_DIRECTORY=/checkpoints\", \"--env\", \"BENCHMARK_ONLY={}\".format(args.benchmark_only), \"--env\", \"ACCURACY_ONLY={}\".format(args.accuracy_only), \"--env\", \"OUTPUT_RESULTS={}\".format(args.output_results), \"--env\", \"NOINSTALL=False\", \"--env\",", "format \" \"`name=value` but received: {}\". format(custom_arg)) env_vars.append(\"--env\") env_vars.append(\"{}\".format(custom_arg)) # Add proxy to", "elif len(matches) == 0: raise ValueError(\"No model was found for {} {} {}\"", "used with the launch script arg_parser = ArgumentParser( parents=[self._common_arg_parser], description=\"Parse args for benchmark", "\"--env\", \"CHECKPOINT_DIRECTORY=/checkpoints\", \"--env\", \"BENCHMARK_ONLY={}\".format(args.benchmark_only), \"--env\", \"ACCURACY_ONLY={}\".format(args.accuracy_only), \"--env\", \"OUTPUT_RESULTS={}\".format(args.output_results), \"--env\", \"NOINSTALL=False\", \"--env\", \"OUTPUT_DIR={}\".format(output_dir)] #", "volume_mounts = [\"--volume\", \"{}:{}\".format(benchmark_scripts, mount_benchmark), \"--volume\", \"{}:{}\".format(args.model_source_dir, mount_external_models_source), \"--volume\", \"{}:{}\".format(intelai_models, mount_intelai_models), \"--volume\", \"{}:/dataset\".format(args.data_location),", "= str(dir_list[framework_index - 1]) # find the intelai_optimized model directory optimized_model_dir = os.path.join(", "mount_benchmark = \"/workspace/benchmarks\" mount_external_models_source = \"/workspace/models\" mount_intelai_models = \"/workspace/intelai_models\" workspace = os.path.join(mount_benchmark, \"common\",", "= \"/workspace/benchmarks\" mount_external_models_source = \"/workspace/models\" mount_intelai_models = \"/workspace/intelai_models\" workspace = os.path.join(mount_benchmark, \"common\", args.framework)", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "intelai_models = os.path.join(benchmark_scripts, os.pardir, \"models\") if args.model_name: # find the path to the", "print_function import glob import os import signal import subprocess import sys from argparse", "args.framework) # grab the use case name from the path use_case = str(dir_list[framework_index", "since they need to run one time without this flag # to get", "args.model_name, args.mode, args.precision) matches = glob.glob(search_path) if len(matches) > 1: # we should", "occurrence of framework in the list framework_index = len(dir_list) - 1 - dir_list[::-1].index(", "in the format \" \"`name=value` but received: {}\". format(custom_arg)) env_vars.append(\"--env\") env_vars.append(\"{}\".format(custom_arg)) # Add", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "argparse import ArgumentParser from common import base_benchmark_util class LaunchBenchmark(base_benchmark_util.BaseBenchmarkUtil): \"\"\"Launches benchmarking job based", "args.verbose: print(\"Docker run command:\\n{}\".format(docker_run_cmd)) self._run_docker_cmd(docker_run_cmd) def _run_docker_cmd(self, docker_run_cmd): \"\"\"runs docker proc and exits", "grab the use case name from the path use_case = str(dir_list[framework_index - 1])", "workspace folder mount_output_dir = True output_dir = args.output_dir in_graph_dir = os.path.dirname(args.input_graph) if args.input_graph", "image string \" \"should not have whitespace(s)\") # validate that we support this", "the model's benchmarks folder search_path = os.path.join( benchmark_scripts, \"*\", args.framework, args.model_name, args.mode, args.precision)", "custom_arg in args.model_args: if \"=\" not in custom_arg: raise ValueError(\"Expected model args in", "__future__ import absolute_import from __future__ import division from __future__ import print_function import glob", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "docker_run_cmd = docker_run_cmd + env_vars + volume_mounts + [ \"--privileged\", \"-u\", \"root:root\", \"-w\",", "args for benchmark interface\") arg_parser.add_argument( \"--docker-image\", help=\"Specify the docker image/tag to use\", dest=\"docker_image\",", "one time without this flag # to get stuff installed # Add custom", "path to find the use case dir_list = matches[0].split(\"/\") # find the last", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "since default is workspace folder mount_output_dir = True output_dir = args.output_dir in_graph_dir =", "- dir_list[::-1].index( args.framework) # grab the use case name from the path use_case", "otherwise we might get TTY error if args.debug: docker_run_cmd.append(\"-it\") docker_run_cmd = docker_run_cmd +", "len(dir_list) - 1 - dir_list[::-1].index( args.framework) # grab the use case name from", "# validate that we support this framework by checking folder names benchmark_dir =", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "set on host for environment_proxy_setting in [ \"http_proxy\", \"ftp_proxy\", \"https_proxy\", \"no_proxy\", ]: if", "limitations under the License. # # SPDX-License-Identifier: EPL-2.0 # from __future__ import absolute_import", "= os.path.join(workspace, 'logs') if args.output_dir != \"/models/benchmarks/common/tensorflow/logs\": # we don't need to mount", "to start running the benchmarking job. \"\"\" benchmark_scripts = os.path.dirname(os.path.realpath(__file__)) intelai_models = os.path.join(benchmark_scripts,", "os.path.isdir(intelai_models): intelai_models = optimized_model_dir mount_benchmark = \"/workspace/benchmarks\" mount_external_models_source = \"/workspace/models\" mount_intelai_models = \"/workspace/intelai_models\"", "installed # Add custom model args as env vars for custom_arg in args.model_args:", "parse_args(self, args): super(LaunchBenchmark, self).define_args() # Additional args that are only used with the", "required by applicable law or agreed to in writing, software # distributed under", "\"\" in_graph_filename = os.path.basename(args.input_graph) if \\ args.input_graph else \"\" env_vars = [\"--env\", \"DATASET_LOCATION_VOL={}\".format(args.data_location),", "# grab the use case name from the path use_case = str(dir_list[framework_index -", "subprocess.Popen(docker_run_cmd, preexec_fn=os.setsid) try: p.communicate() except KeyboardInterrupt: os.killpg(os.getpgid(p.pid), signal.SIGKILL) if __name__ == \"__main__\": util", "applicable law or agreed to in writing, software # distributed under the License", "if we find an optimized model, then we will use that path if", "if not args.debug: docker_run_cmd.append(\"start.sh\") if args.verbose: print(\"Docker run command:\\n{}\".format(docker_run_cmd)) self._run_docker_cmd(docker_run_cmd) def _run_docker_cmd(self, docker_run_cmd):", "dest=\"docker_image\", default=None, required=True) arg_parser.add_argument( \"--debug\", help=\"Launches debug mode which doesn't execute \" \"start.sh\",", "\"--volume\", \"{}:/checkpoints\".format(args.checkpoint), \"--volume\", \"{}:/in_graph\".format(in_graph_dir)] if mount_output_dir: volume_mounts.extend([ \"--volume\", \"{}:{}\".format(output_dir, output_dir)]) docker_run_cmd = [\"docker\",", "use_case = str(dir_list[framework_index - 1]) # find the intelai_optimized model directory optimized_model_dir =", "License. # # SPDX-License-Identifier: EPL-2.0 # from __future__ import absolute_import from __future__ import", "try: self.validate_args(args) except (IOError, ValueError) as e: print(\"\\nError: {}\".format(e)) sys.exit(1) self.run_docker_container(args) def parse_args(self,", "arg_parser.parse_known_args(args) def validate_args(self, args): \"\"\"validate the args\"\"\" # validate the shared args first", "matches = glob.glob(search_path) if len(matches) > 1: # we should never get more", "\"--env\", \"IN_GRAPH=/in_graph/{}\".format(in_graph_filename), \"--env\", \"MOUNT_BENCHMARK={}\".format(mount_benchmark), \"--env\", \"MOUNT_EXTERNAL_MODELS_SOURCE={}\".format(mount_external_models_source), \"--env\", \"MOUNT_INTELAI_MODELS_SOURCE={}\".format(mount_intelai_models), \"--env\", \"USE_CASE={}\".format(use_case), \"--env\", \"FRAMEWORK={}\".format(args.framework), \"--env\",", "flag # to get stuff installed # Add custom model args as env", "args): super(LaunchBenchmark, self).define_args() # Additional args that are only used with the launch", "stuff installed # Add custom model args as env vars for custom_arg in", "or agreed to in writing, software # distributed under the License is distributed", "# Add proxy to env variables if any set on host for environment_proxy_setting", "mode which doesn't execute \" \"start.sh\", action=\"store_true\") return arg_parser.parse_known_args(args) def validate_args(self, args): \"\"\"validate", "benchmark_only as the default if not args.benchmark_only and not args.accuracy_only: args.benchmark_only = True", "model's benchmarks folder search_path = os.path.join( benchmark_scripts, \"*\", args.framework, args.model_name, args.mode, args.precision) matches", "sys.exit(1) self.run_docker_container(args) def parse_args(self, args): super(LaunchBenchmark, self).define_args() # Additional args that are only", "\"--env\", \"PRECISION={}\".format(args.precision), \"--env\", \"VERBOSE={}\".format(args.verbose), \"--env\", \"BATCH_SIZE={}\".format(args.batch_size), \"--env\", \"WORKSPACE={}\".format(workspace), \"--env\", \"IN_GRAPH=/in_graph/{}\".format(in_graph_filename), \"--env\", \"MOUNT_BENCHMARK={}\".format(mount_benchmark), \"--env\",", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "env_vars.append(\"{}={}\".format( environment_proxy_setting, os.environ.get(environment_proxy_setting) )) volume_mounts = [\"--volume\", \"{}:{}\".format(benchmark_scripts, mount_benchmark), \"--volume\", \"{}:{}\".format(args.model_source_dir, mount_external_models_source), \"--volume\",", "raise ValueError(\"docker image string \" \"should not have whitespace(s)\") # validate that we", "if \\ args.input_graph else \"\" env_vars = [\"--env\", \"DATASET_LOCATION_VOL={}\".format(args.data_location), \"--env\", \"CHECKPOINT_DIRECTORY_VOL={}\".format(args.checkpoint), \"--env\", \"EXTERNAL_MODELS_SOURCE_DIRECTORY={}\".format(args.model_source_dir),", "from common import base_benchmark_util class LaunchBenchmark(base_benchmark_util.BaseBenchmarkUtil): \"\"\"Launches benchmarking job based on the specified", "intelai_models = optimized_model_dir mount_benchmark = \"/workspace/benchmarks\" mount_external_models_source = \"/workspace/models\" mount_intelai_models = \"/workspace/intelai_models\" workspace", "more than one match raise ValueError(\"Found multiple model locations for {} {} {}\"", "benchmark interface\") arg_parser.add_argument( \"--docker-image\", help=\"Specify the docker image/tag to use\", dest=\"docker_image\", default=None, required=True)", "vars for custom_arg in args.model_args: if \"=\" not in custom_arg: raise ValueError(\"Expected model", "specified image and environment variables to start running the benchmarking job. \"\"\" benchmark_scripts", "ValueError) as e: print(\"\\nError: {}\".format(e)) sys.exit(1) self.run_docker_container(args) def parse_args(self, args): super(LaunchBenchmark, self).define_args() #", "validate that we support this framework by checking folder names benchmark_dir = os.path.dirname(os.path.realpath(__file__))", "in the list framework_index = len(dir_list) - 1 - dir_list[::-1].index( args.framework) # grab", "\"{}:/checkpoints\".format(args.checkpoint), \"--volume\", \"{}:/in_graph\".format(in_graph_dir)] if mount_output_dir: volume_mounts.extend([ \"--volume\", \"{}:{}\".format(output_dir, output_dir)]) docker_run_cmd = [\"docker\", \"run\"]", "under the Apache License, Version 2.0 (the \"License\"); # you may not use", "for {} {} {}\" .format(args.framework, args.model_name, args.precision)) elif len(matches) == 0: raise ValueError(\"No", "folder names benchmark_dir = os.path.dirname(os.path.realpath(__file__)) if glob.glob(\"{}/*/{}\".format(benchmark_dir, args.framework)) == []: raise ValueError(\"The specified", "writing, software # distributed under the License is distributed on an \"AS IS\"", "specified framework is not supported: {}\". format(args.framework)) # if neither benchmark_only or accuracy_only", "SPDX-License-Identifier: EPL-2.0 # from __future__ import absolute_import from __future__ import division from __future__", "args.accuracy_only: args.benchmark_only = True def run_docker_container(self, args): \"\"\" Runs a docker container with", "to find the use case dir_list = matches[0].split(\"/\") # find the last occurrence", "\"\" env_vars = [\"--env\", \"DATASET_LOCATION_VOL={}\".format(args.data_location), \"--env\", \"CHECKPOINT_DIRECTORY_VOL={}\".format(args.checkpoint), \"--env\", \"EXTERNAL_MODELS_SOURCE_DIRECTORY={}\".format(args.model_source_dir), \"--env\", \"INTELAI_MODELS={}\".format(intelai_models), \"--env\", \"BENCHMARK_SCRIPTS={}\".format(benchmark_scripts),", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "License. # You may obtain a copy of the License at # #", "args.model_name, args.precision)) # use the benchmarks directory path to find the use case", "e: print(\"\\nError: {}\".format(e)) sys.exit(1) self.run_docker_container(args) def parse_args(self, args): super(LaunchBenchmark, self).define_args() # Additional args", "= os.path.dirname(os.path.realpath(__file__)) if glob.glob(\"{}/*/{}\".format(benchmark_dir, args.framework)) == []: raise ValueError(\"The specified framework is not", "compliance with the License. # You may obtain a copy of the License", "common import base_benchmark_util class LaunchBenchmark(base_benchmark_util.BaseBenchmarkUtil): \"\"\"Launches benchmarking job based on the specified args", "\"`name=value` but received: {}\". format(custom_arg)) env_vars.append(\"--env\") env_vars.append(\"{}\".format(custom_arg)) # Add proxy to env variables", "self).validate_args(args) # Check for spaces in docker image if ' ' in args.docker_image:", "the specified args \"\"\" def main(self): args, unknown = self.parse_args(sys.argv[1:]) try: self.validate_args(args) except", "\"\"\"runs docker proc and exits on ctrl c\"\"\" p = subprocess.Popen(docker_run_cmd, preexec_fn=os.setsid) try:", "os.path.join( benchmark_scripts, \"*\", args.framework, args.model_name, args.mode, args.precision) matches = glob.glob(search_path) if len(matches) >", "+ volume_mounts + [ \"--privileged\", \"-u\", \"root:root\", \"-w\", workspace, args.docker_image, \"/bin/bash\"] if not", "received: {}\". format(custom_arg)) env_vars.append(\"--env\") env_vars.append(\"{}\".format(custom_arg)) # Add proxy to env variables if any", "mount_output_dir: volume_mounts.extend([ \"--volume\", \"{}:{}\".format(output_dir, output_dir)]) docker_run_cmd = [\"docker\", \"run\"] # only use -it", "the last occurrence of framework in the list framework_index = len(dir_list) - 1", "\"BATCH_SIZE={}\".format(args.batch_size), \"--env\", \"WORKSPACE={}\".format(workspace), \"--env\", \"IN_GRAPH=/in_graph/{}\".format(in_graph_filename), \"--env\", \"MOUNT_BENCHMARK={}\".format(mount_benchmark), \"--env\", \"MOUNT_EXTERNAL_MODELS_SOURCE={}\".format(mount_external_models_source), \"--env\", \"MOUNT_INTELAI_MODELS_SOURCE={}\".format(mount_intelai_models), \"--env\", \"USE_CASE={}\".format(use_case),", "the shared args first super(LaunchBenchmark, self).validate_args(args) # Check for spaces in docker image", "except (IOError, ValueError) as e: print(\"\\nError: {}\".format(e)) sys.exit(1) self.run_docker_container(args) def parse_args(self, args): super(LaunchBenchmark,", "\"/workspace/benchmarks\" mount_external_models_source = \"/workspace/models\" mount_intelai_models = \"/workspace/intelai_models\" workspace = os.path.join(mount_benchmark, \"common\", args.framework) mount_output_dir", "custom_arg: raise ValueError(\"Expected model args in the format \" \"`name=value` but received: {}\".", "never get more than one match raise ValueError(\"Found multiple model locations for {}", "then enable # benchmark_only as the default if not args.benchmark_only and not args.accuracy_only:", "os.path.join( benchmark_scripts, os.pardir, \"models\", use_case, args.framework, args.model_name) # if we find an optimized", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "import print_function import glob import os import signal import subprocess import sys from", "that we support this framework by checking folder names benchmark_dir = os.path.dirname(os.path.realpath(__file__)) if", "{} {}\" .format(args.framework, args.model_name, args.precision)) elif len(matches) == 0: raise ValueError(\"No model was", "benchmarking job based on the specified args \"\"\" def main(self): args, unknown =", "args.debug: docker_run_cmd.append(\"-it\") docker_run_cmd = docker_run_cmd + env_vars + volume_mounts + [ \"--privileged\", \"-u\",", "= False output_dir = os.path.join(workspace, 'logs') if args.output_dir != \"/models/benchmarks/common/tensorflow/logs\": # we don't", "\"--env\", \"MODE={}\".format(args.mode), \"--env\", \"PRECISION={}\".format(args.precision), \"--env\", \"VERBOSE={}\".format(args.verbose), \"--env\", \"BATCH_SIZE={}\".format(args.batch_size), \"--env\", \"WORKSPACE={}\".format(workspace), \"--env\", \"IN_GRAPH=/in_graph/{}\".format(in_graph_filename), \"--env\",", "optimized model, then we will use that path if os.path.isdir(intelai_models): intelai_models = optimized_model_dir", "otherwise since default is workspace folder mount_output_dir = True output_dir = args.output_dir in_graph_dir", "\"--env\", \"BENCHMARK_ONLY={}\".format(args.benchmark_only), \"--env\", \"ACCURACY_ONLY={}\".format(args.accuracy_only), \"--env\", \"OUTPUT_RESULTS={}\".format(args.output_results), \"--env\", \"NOINSTALL=False\", \"--env\", \"OUTPUT_DIR={}\".format(output_dir)] # by default", "as env vars for custom_arg in args.model_args: if \"=\" not in custom_arg: raise", "case dir_list = matches[0].split(\"/\") # find the last occurrence of framework in the", "arg_parser.add_argument( \"--docker-image\", help=\"Specify the docker image/tag to use\", dest=\"docker_image\", default=None, required=True) arg_parser.add_argument( \"--debug\",", "install, user needs to set NOINSTALL=True # manually after they get into `--debug`", "\"{}:{}\".format(args.model_source_dir, mount_external_models_source), \"--volume\", \"{}:{}\".format(intelai_models, mount_intelai_models), \"--volume\", \"{}:/dataset\".format(args.data_location), \"--volume\", \"{}:/checkpoints\".format(args.checkpoint), \"--volume\", \"{}:/in_graph\".format(in_graph_dir)] if mount_output_dir:", "= os.path.basename(args.input_graph) if \\ args.input_graph else \"\" env_vars = [\"--env\", \"DATASET_LOCATION_VOL={}\".format(args.data_location), \"--env\", \"CHECKPOINT_DIRECTORY_VOL={}\".format(args.checkpoint),", "os.path.dirname(os.path.realpath(__file__)) intelai_models = os.path.join(benchmark_scripts, os.pardir, \"models\") if args.model_name: # find the path to", "not use this file except in compliance with the License. # You may", "of framework in the list framework_index = len(dir_list) - 1 - dir_list[::-1].index( args.framework)", "on ctrl c\"\"\" p = subprocess.Popen(docker_run_cmd, preexec_fn=os.setsid) try: p.communicate() except KeyboardInterrupt: os.killpg(os.getpgid(p.pid), signal.SIGKILL)", "utf-8 -*- # # Copyright (c) 2018 Intel Corporation # # Licensed under", "{}\". format(args.framework)) # if neither benchmark_only or accuracy_only are specified, then enable #", "License, Version 2.0 (the \"License\"); # you may not use this file except", "exits on ctrl c\"\"\" p = subprocess.Popen(docker_run_cmd, preexec_fn=os.setsid) try: p.communicate() except KeyboardInterrupt: os.killpg(os.getpgid(p.pid),", "Corporation # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "\"{}:/in_graph\".format(in_graph_dir)] if mount_output_dir: volume_mounts.extend([ \"--volume\", \"{}:{}\".format(output_dir, output_dir)]) docker_run_cmd = [\"docker\", \"run\"] # only", "execute \" \"start.sh\", action=\"store_true\") return arg_parser.parse_known_args(args) def validate_args(self, args): \"\"\"validate the args\"\"\" #", "the benchmarks directory path to find the use case dir_list = matches[0].split(\"/\") #", "base_benchmark_util class LaunchBenchmark(base_benchmark_util.BaseBenchmarkUtil): \"\"\"Launches benchmarking job based on the specified args \"\"\" def", "support this framework by checking folder names benchmark_dir = os.path.dirname(os.path.realpath(__file__)) if glob.glob(\"{}/*/{}\".format(benchmark_dir, args.framework))", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "in args.docker_image: raise ValueError(\"docker image string \" \"should not have whitespace(s)\") # validate", "set NOINSTALL=True # manually after they get into `--debug` mode # since they", "signal import subprocess import sys from argparse import ArgumentParser from common import base_benchmark_util", "args.framework) mount_output_dir = False output_dir = os.path.join(workspace, 'logs') if args.output_dir != \"/models/benchmarks/common/tensorflow/logs\": #", "dir otherwise since default is workspace folder mount_output_dir = True output_dir = args.output_dir", "should never get more than one match raise ValueError(\"Found multiple model locations for", "(IOError, ValueError) as e: print(\"\\nError: {}\".format(e)) sys.exit(1) self.run_docker_container(args) def parse_args(self, args): super(LaunchBenchmark, self).define_args()", "\"--env\", \"MOUNT_BENCHMARK={}\".format(mount_benchmark), \"--env\", \"MOUNT_EXTERNAL_MODELS_SOURCE={}\".format(mount_external_models_source), \"--env\", \"MOUNT_INTELAI_MODELS_SOURCE={}\".format(mount_intelai_models), \"--env\", \"USE_CASE={}\".format(use_case), \"--env\", \"FRAMEWORK={}\".format(args.framework), \"--env\", \"NUM_CORES={}\".format(args.num_cores), \"--env\",", "# you may not use this file except in compliance with the License.", "glob.glob(\"{}/*/{}\".format(benchmark_dir, args.framework)) == []: raise ValueError(\"The specified framework is not supported: {}\". format(args.framework))", "== []: raise ValueError(\"The specified framework is not supported: {}\". format(args.framework)) # if", "docker_run_cmd): \"\"\"runs docker proc and exits on ctrl c\"\"\" p = subprocess.Popen(docker_run_cmd, preexec_fn=os.setsid)", "\"--volume\", \"{}:{}\".format(intelai_models, mount_intelai_models), \"--volume\", \"{}:/dataset\".format(args.data_location), \"--volume\", \"{}:/checkpoints\".format(args.checkpoint), \"--volume\", \"{}:/in_graph\".format(in_graph_dir)] if mount_output_dir: volume_mounts.extend([ \"--volume\",", "agreed to in writing, software # distributed under the License is distributed on", "= matches[0].split(\"/\") # find the last occurrence of framework in the list framework_index", "\" \"should not have whitespace(s)\") # validate that we support this framework by", "= self.parse_args(sys.argv[1:]) try: self.validate_args(args) except (IOError, ValueError) as e: print(\"\\nError: {}\".format(e)) sys.exit(1) self.run_docker_container(args)", "args.mode, args.precision) matches = glob.glob(search_path) if len(matches) > 1: # we should never", "language governing permissions and # limitations under the License. # # SPDX-License-Identifier: EPL-2.0", "directory path to find the use case dir_list = matches[0].split(\"/\") # find the", "help=\"Launches debug mode which doesn't execute \" \"start.sh\", action=\"store_true\") return arg_parser.parse_known_args(args) def validate_args(self,", "environment variables to start running the benchmarking job. \"\"\" benchmark_scripts = os.path.dirname(os.path.realpath(__file__)) intelai_models", "os.path.dirname(os.path.realpath(__file__)) if glob.glob(\"{}/*/{}\".format(benchmark_dir, args.framework)) == []: raise ValueError(\"The specified framework is not supported:", "find an optimized model, then we will use that path if os.path.isdir(intelai_models): intelai_models", "mount_output_dir = True output_dir = args.output_dir in_graph_dir = os.path.dirname(args.input_graph) if args.input_graph \\ else", "in_graph_dir = os.path.dirname(args.input_graph) if args.input_graph \\ else \"\" in_graph_filename = os.path.basename(args.input_graph) if \\", "(the \"License\"); # you may not use this file except in compliance with", "# find the last occurrence of framework in the list framework_index = len(dir_list)", "container with the specified image and environment variables to start running the benchmarking", "if len(matches) > 1: # we should never get more than one match", "if os.path.isdir(intelai_models): intelai_models = optimized_model_dir mount_benchmark = \"/workspace/benchmarks\" mount_external_models_source = \"/workspace/models\" mount_intelai_models =", "= [\"--env\", \"DATASET_LOCATION_VOL={}\".format(args.data_location), \"--env\", \"CHECKPOINT_DIRECTORY_VOL={}\".format(args.checkpoint), \"--env\", \"EXTERNAL_MODELS_SOURCE_DIRECTORY={}\".format(args.model_source_dir), \"--env\", \"INTELAI_MODELS={}\".format(intelai_models), \"--env\", \"BENCHMARK_SCRIPTS={}\".format(benchmark_scripts), \"--env\", \"SOCKET_ID={}\".format(args.socket_id),", "framework is not supported: {}\". format(args.framework)) # if neither benchmark_only or accuracy_only are", "specified, then enable # benchmark_only as the default if not args.benchmark_only and not", "if \"=\" not in custom_arg: raise ValueError(\"Expected model args in the format \"", "False output_dir = os.path.join(workspace, 'logs') if args.output_dir != \"/models/benchmarks/common/tensorflow/logs\": # we don't need", "optimized_model_dir = os.path.join( benchmark_scripts, os.pardir, \"models\", use_case, args.framework, args.model_name) # if we find", "the default if not args.benchmark_only and not args.accuracy_only: args.benchmark_only = True def run_docker_container(self,", "def validate_args(self, args): \"\"\"validate the args\"\"\" # validate the shared args first super(LaunchBenchmark,", "# Add custom model args as env vars for custom_arg in args.model_args: if", "# Unless required by applicable law or agreed to in writing, software #", "\" \"`name=value` but received: {}\". format(custom_arg)) env_vars.append(\"--env\") env_vars.append(\"{}\".format(custom_arg)) # Add proxy to env", "they need to run one time without this flag # to get stuff", "model was found for {} {} {}\" .format(args.framework, args.model_name, args.precision)) # use the", "by applicable law or agreed to in writing, software # distributed under the", "in_graph_filename = os.path.basename(args.input_graph) if \\ args.input_graph else \"\" env_vars = [\"--env\", \"DATASET_LOCATION_VOL={}\".format(args.data_location), \"--env\",", ")) volume_mounts = [\"--volume\", \"{}:{}\".format(benchmark_scripts, mount_benchmark), \"--volume\", \"{}:{}\".format(args.model_source_dir, mount_external_models_source), \"--volume\", \"{}:{}\".format(intelai_models, mount_intelai_models), \"--volume\",", "use case name from the path use_case = str(dir_list[framework_index - 1]) # find", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "name from the path use_case = str(dir_list[framework_index - 1]) # find the intelai_optimized", "permissions and # limitations under the License. # # SPDX-License-Identifier: EPL-2.0 # from", "get TTY error if args.debug: docker_run_cmd.append(\"-it\") docker_run_cmd = docker_run_cmd + env_vars + volume_mounts", "{}\".format(e)) sys.exit(1) self.run_docker_container(args) def parse_args(self, args): super(LaunchBenchmark, self).define_args() # Additional args that are", "= \"/workspace/models\" mount_intelai_models = \"/workspace/intelai_models\" workspace = os.path.join(mount_benchmark, \"common\", args.framework) mount_output_dir = False", "os.path.basename(args.input_graph) if \\ args.input_graph else \"\" env_vars = [\"--env\", \"DATASET_LOCATION_VOL={}\".format(args.data_location), \"--env\", \"CHECKPOINT_DIRECTORY_VOL={}\".format(args.checkpoint), \"--env\",", "\"IN_GRAPH=/in_graph/{}\".format(in_graph_filename), \"--env\", \"MOUNT_BENCHMARK={}\".format(mount_benchmark), \"--env\", \"MOUNT_EXTERNAL_MODELS_SOURCE={}\".format(mount_external_models_source), \"--env\", \"MOUNT_INTELAI_MODELS_SOURCE={}\".format(mount_intelai_models), \"--env\", \"USE_CASE={}\".format(use_case), \"--env\", \"FRAMEWORK={}\".format(args.framework), \"--env\", \"NUM_CORES={}\".format(args.num_cores),", "\"MODEL_NAME={}\".format(args.model_name), \"--env\", \"MODE={}\".format(args.mode), \"--env\", \"PRECISION={}\".format(args.precision), \"--env\", \"VERBOSE={}\".format(args.verbose), \"--env\", \"BATCH_SIZE={}\".format(args.batch_size), \"--env\", \"WORKSPACE={}\".format(workspace), \"--env\", \"IN_GRAPH=/in_graph/{}\".format(in_graph_filename),", "\"BENCHMARK_SCRIPTS={}\".format(benchmark_scripts), \"--env\", \"SOCKET_ID={}\".format(args.socket_id), \"--env\", \"MODEL_NAME={}\".format(args.model_name), \"--env\", \"MODE={}\".format(args.mode), \"--env\", \"PRECISION={}\".format(args.precision), \"--env\", \"VERBOSE={}\".format(args.verbose), \"--env\", \"BATCH_SIZE={}\".format(args.batch_size),", "args first super(LaunchBenchmark, self).validate_args(args) # Check for spaces in docker image if '", "mount_external_models_source), \"--volume\", \"{}:{}\".format(intelai_models, mount_intelai_models), \"--volume\", \"{}:/dataset\".format(args.data_location), \"--volume\", \"{}:/checkpoints\".format(args.checkpoint), \"--volume\", \"{}:/in_graph\".format(in_graph_dir)] if mount_output_dir: volume_mounts.extend([", "Copyright (c) 2018 Intel Corporation # # Licensed under the Apache License, Version", "file except in compliance with the License. # You may obtain a copy", "docker_run_cmd.append(\"start.sh\") if args.verbose: print(\"Docker run command:\\n{}\".format(docker_run_cmd)) self._run_docker_cmd(docker_run_cmd) def _run_docker_cmd(self, docker_run_cmd): \"\"\"runs docker proc", "with the launch script arg_parser = ArgumentParser( parents=[self._common_arg_parser], description=\"Parse args for benchmark interface\")", "not os.environ.get(environment_proxy_setting): continue env_vars.append(\"--env\") env_vars.append(\"{}={}\".format( environment_proxy_setting, os.environ.get(environment_proxy_setting) )) volume_mounts = [\"--volume\", \"{}:{}\".format(benchmark_scripts, mount_benchmark),", "only use -it when debugging, otherwise we might get TTY error if args.debug:", "the path to the model's benchmarks folder search_path = os.path.join( benchmark_scripts, \"*\", args.framework,", "True def run_docker_container(self, args): \"\"\" Runs a docker container with the specified image", "License for the specific language governing permissions and # limitations under the License.", "run_docker_container(self, args): \"\"\" Runs a docker container with the specified image and environment", "find the intelai_optimized model directory optimized_model_dir = os.path.join( benchmark_scripts, os.pardir, \"models\", use_case, args.framework,", "\"--volume\", \"{}:/in_graph\".format(in_graph_dir)] if mount_output_dir: volume_mounts.extend([ \"--volume\", \"{}:{}\".format(output_dir, output_dir)]) docker_run_cmd = [\"docker\", \"run\"] #", "found for {} {} {}\" .format(args.framework, args.model_name, args.precision)) # use the benchmarks directory", "to in writing, software # distributed under the License is distributed on an", "\"--env\", \"BATCH_SIZE={}\".format(args.batch_size), \"--env\", \"WORKSPACE={}\".format(workspace), \"--env\", \"IN_GRAPH=/in_graph/{}\".format(in_graph_filename), \"--env\", \"MOUNT_BENCHMARK={}\".format(mount_benchmark), \"--env\", \"MOUNT_EXTERNAL_MODELS_SOURCE={}\".format(mount_external_models_source), \"--env\", \"MOUNT_INTELAI_MODELS_SOURCE={}\".format(mount_intelai_models), \"--env\",", "[\"--volume\", \"{}:{}\".format(benchmark_scripts, mount_benchmark), \"--volume\", \"{}:{}\".format(args.model_source_dir, mount_external_models_source), \"--volume\", \"{}:{}\".format(intelai_models, mount_intelai_models), \"--volume\", \"{}:/dataset\".format(args.data_location), \"--volume\", \"{}:/checkpoints\".format(args.checkpoint),", "implied. # See the License for the specific language governing permissions and #", "args.input_graph else \"\" env_vars = [\"--env\", \"DATASET_LOCATION_VOL={}\".format(args.data_location), \"--env\", \"CHECKPOINT_DIRECTORY_VOL={}\".format(args.checkpoint), \"--env\", \"EXTERNAL_MODELS_SOURCE_DIRECTORY={}\".format(args.model_source_dir), \"--env\", \"INTELAI_MODELS={}\".format(intelai_models),", "# by default we will install, user needs to set NOINSTALL=True # manually", "\"License\"); # you may not use this file except in compliance with the", "\"VERBOSE={}\".format(args.verbose), \"--env\", \"BATCH_SIZE={}\".format(args.batch_size), \"--env\", \"WORKSPACE={}\".format(workspace), \"--env\", \"IN_GRAPH=/in_graph/{}\".format(in_graph_filename), \"--env\", \"MOUNT_BENCHMARK={}\".format(mount_benchmark), \"--env\", \"MOUNT_EXTERNAL_MODELS_SOURCE={}\".format(mount_external_models_source), \"--env\", \"MOUNT_INTELAI_MODELS_SOURCE={}\".format(mount_intelai_models),", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "args.output_dir in_graph_dir = os.path.dirname(args.input_graph) if args.input_graph \\ else \"\" in_graph_filename = os.path.basename(args.input_graph) if", "volume_mounts + [ \"--privileged\", \"-u\", \"root:root\", \"-w\", workspace, args.docker_image, \"/bin/bash\"] if not args.debug:", "]: if not os.environ.get(environment_proxy_setting): continue env_vars.append(\"--env\") env_vars.append(\"{}={}\".format( environment_proxy_setting, os.environ.get(environment_proxy_setting) )) volume_mounts = [\"--volume\",", "validate the shared args first super(LaunchBenchmark, self).validate_args(args) # Check for spaces in docker", "we should never get more than one match raise ValueError(\"Found multiple model locations", "= ArgumentParser( parents=[self._common_arg_parser], description=\"Parse args for benchmark interface\") arg_parser.add_argument( \"--docker-image\", help=\"Specify the docker", "path use_case = str(dir_list[framework_index - 1]) # find the intelai_optimized model directory optimized_model_dir", "error if args.debug: docker_run_cmd.append(\"-it\") docker_run_cmd = docker_run_cmd + env_vars + volume_mounts + [", "\"--env\", \"SOCKET_ID={}\".format(args.socket_id), \"--env\", \"MODEL_NAME={}\".format(args.model_name), \"--env\", \"MODE={}\".format(args.mode), \"--env\", \"PRECISION={}\".format(args.precision), \"--env\", \"VERBOSE={}\".format(args.verbose), \"--env\", \"BATCH_SIZE={}\".format(args.batch_size), \"--env\",", "args.benchmark_only = True def run_docker_container(self, args): \"\"\" Runs a docker container with the", "# Copyright (c) 2018 Intel Corporation # # Licensed under the Apache License,", "model args in the format \" \"`name=value` but received: {}\". format(custom_arg)) env_vars.append(\"--env\") env_vars.append(\"{}\".format(custom_arg))", "\"/workspace/models\" mount_intelai_models = \"/workspace/intelai_models\" workspace = os.path.join(mount_benchmark, \"common\", args.framework) mount_output_dir = False output_dir", "by checking folder names benchmark_dir = os.path.dirname(os.path.realpath(__file__)) if glob.glob(\"{}/*/{}\".format(benchmark_dir, args.framework)) == []: raise", "\"DATASET_LOCATION=/dataset\", \"--env\", \"CHECKPOINT_DIRECTORY=/checkpoints\", \"--env\", \"BENCHMARK_ONLY={}\".format(args.benchmark_only), \"--env\", \"ACCURACY_ONLY={}\".format(args.accuracy_only), \"--env\", \"OUTPUT_RESULTS={}\".format(args.output_results), \"--env\", \"NOINSTALL=False\", \"--env\", \"OUTPUT_DIR={}\".format(output_dir)]", "the format \" \"`name=value` but received: {}\". format(custom_arg)) env_vars.append(\"--env\") env_vars.append(\"{}\".format(custom_arg)) # Add proxy", "or implied. # See the License for the specific language governing permissions and", "find the path to the model's benchmarks folder search_path = os.path.join( benchmark_scripts, \"*\",", "if mount_output_dir: volume_mounts.extend([ \"--volume\", \"{}:{}\".format(output_dir, output_dir)]) docker_run_cmd = [\"docker\", \"run\"] # only use", "specific language governing permissions and # limitations under the License. # # SPDX-License-Identifier:", "locations for {} {} {}\" .format(args.framework, args.model_name, args.precision)) elif len(matches) == 0: raise", "\"\"\" def main(self): args, unknown = self.parse_args(sys.argv[1:]) try: self.validate_args(args) except (IOError, ValueError) as", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "' ' in args.docker_image: raise ValueError(\"docker image string \" \"should not have whitespace(s)\")", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "raise ValueError(\"The specified framework is not supported: {}\". format(args.framework)) # if neither benchmark_only", "proc and exits on ctrl c\"\"\" p = subprocess.Popen(docker_run_cmd, preexec_fn=os.setsid) try: p.communicate() except", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "and not args.accuracy_only: args.benchmark_only = True def run_docker_container(self, args): \"\"\" Runs a docker", "\"NOINSTALL=False\", \"--env\", \"OUTPUT_DIR={}\".format(output_dir)] # by default we will install, user needs to set", "in writing, software # distributed under the License is distributed on an \"AS", "optimized_model_dir mount_benchmark = \"/workspace/benchmarks\" mount_external_models_source = \"/workspace/models\" mount_intelai_models = \"/workspace/intelai_models\" workspace = os.path.join(mount_benchmark,", "governing permissions and # limitations under the License. # # SPDX-License-Identifier: EPL-2.0 #", "import glob import os import signal import subprocess import sys from argparse import", "{} {}\" .format(args.framework, args.model_name, args.precision)) # use the benchmarks directory path to find", "env_vars = [\"--env\", \"DATASET_LOCATION_VOL={}\".format(args.data_location), \"--env\", \"CHECKPOINT_DIRECTORY_VOL={}\".format(args.checkpoint), \"--env\", \"EXTERNAL_MODELS_SOURCE_DIRECTORY={}\".format(args.model_source_dir), \"--env\", \"INTELAI_MODELS={}\".format(intelai_models), \"--env\", \"BENCHMARK_SCRIPTS={}\".format(benchmark_scripts), \"--env\",", "mount_external_models_source = \"/workspace/models\" mount_intelai_models = \"/workspace/intelai_models\" workspace = os.path.join(mount_benchmark, \"common\", args.framework) mount_output_dir =", "will install, user needs to set NOINSTALL=True # manually after they get into", "# See the License for the specific language governing permissions and # limitations", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "ArgumentParser from common import base_benchmark_util class LaunchBenchmark(base_benchmark_util.BaseBenchmarkUtil): \"\"\"Launches benchmarking job based on the", "args that are only used with the launch script arg_parser = ArgumentParser( parents=[self._common_arg_parser],", "len(matches) > 1: # we should never get more than one match raise", "import subprocess import sys from argparse import ArgumentParser from common import base_benchmark_util class", "proxy to env variables if any set on host for environment_proxy_setting in [", "mount_intelai_models), \"--volume\", \"{}:/dataset\".format(args.data_location), \"--volume\", \"{}:/checkpoints\".format(args.checkpoint), \"--volume\", \"{}:/in_graph\".format(in_graph_dir)] if mount_output_dir: volume_mounts.extend([ \"--volume\", \"{}:{}\".format(output_dir, output_dir)])", "= os.path.join(mount_benchmark, \"common\", args.framework) mount_output_dir = False output_dir = os.path.join(workspace, 'logs') if args.output_dir", "user needs to set NOINSTALL=True # manually after they get into `--debug` mode", "is not supported: {}\". format(args.framework)) # if neither benchmark_only or accuracy_only are specified,", "# use the benchmarks directory path to find the use case dir_list =", "print(\"Docker run command:\\n{}\".format(docker_run_cmd)) self._run_docker_cmd(docker_run_cmd) def _run_docker_cmd(self, docker_run_cmd): \"\"\"runs docker proc and exits on", "docker container with the specified image and environment variables to start running the", "args.framework, args.model_name) # if we find an optimized model, then we will use", "-*- coding: utf-8 -*- # # Copyright (c) 2018 Intel Corporation # #", "this framework by checking folder names benchmark_dir = os.path.dirname(os.path.realpath(__file__)) if glob.glob(\"{}/*/{}\".format(benchmark_dir, args.framework)) ==", "= [\"docker\", \"run\"] # only use -it when debugging, otherwise we might get", "if any set on host for environment_proxy_setting in [ \"http_proxy\", \"ftp_proxy\", \"https_proxy\", \"no_proxy\",", "to mount log dir otherwise since default is workspace folder mount_output_dir = True", "was found for {} {} {}\" .format(args.framework, args.model_name, args.precision)) # use the benchmarks", "arg_parser.add_argument( \"--debug\", help=\"Launches debug mode which doesn't execute \" \"start.sh\", action=\"store_true\") return arg_parser.parse_known_args(args)", "\"PRECISION={}\".format(args.precision), \"--env\", \"VERBOSE={}\".format(args.verbose), \"--env\", \"BATCH_SIZE={}\".format(args.batch_size), \"--env\", \"WORKSPACE={}\".format(workspace), \"--env\", \"IN_GRAPH=/in_graph/{}\".format(in_graph_filename), \"--env\", \"MOUNT_BENCHMARK={}\".format(mount_benchmark), \"--env\", \"MOUNT_EXTERNAL_MODELS_SOURCE={}\".format(mount_external_models_source),", "env_vars.append(\"--env\") env_vars.append(\"{}\".format(custom_arg)) # Add proxy to env variables if any set on host", "self).define_args() # Additional args that are only used with the launch script arg_parser", "will use that path if os.path.isdir(intelai_models): intelai_models = optimized_model_dir mount_benchmark = \"/workspace/benchmarks\" mount_external_models_source", "args): \"\"\"validate the args\"\"\" # validate the shared args first super(LaunchBenchmark, self).validate_args(args) #", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "you may not use this file except in compliance with the License. #", "an optimized model, then we will use that path if os.path.isdir(intelai_models): intelai_models =", "folder mount_output_dir = True output_dir = args.output_dir in_graph_dir = os.path.dirname(args.input_graph) if args.input_graph \\", "after they get into `--debug` mode # since they need to run one", "super(LaunchBenchmark, self).validate_args(args) # Check for spaces in docker image if ' ' in", "\"--env\", \"INTELAI_MODELS={}\".format(intelai_models), \"--env\", \"BENCHMARK_SCRIPTS={}\".format(benchmark_scripts), \"--env\", \"SOCKET_ID={}\".format(args.socket_id), \"--env\", \"MODEL_NAME={}\".format(args.model_name), \"--env\", \"MODE={}\".format(args.mode), \"--env\", \"PRECISION={}\".format(args.precision), \"--env\",", "we will install, user needs to set NOINSTALL=True # manually after they get", "\"/workspace/intelai_models\" workspace = os.path.join(mount_benchmark, \"common\", args.framework) mount_output_dir = False output_dir = os.path.join(workspace, 'logs')", "not in custom_arg: raise ValueError(\"Expected model args in the format \" \"`name=value` but", "we don't need to mount log dir otherwise since default is workspace folder", "#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright (c) 2018 Intel", "from argparse import ArgumentParser from common import base_benchmark_util class LaunchBenchmark(base_benchmark_util.BaseBenchmarkUtil): \"\"\"Launches benchmarking job", "folder search_path = os.path.join( benchmark_scripts, \"*\", args.framework, args.model_name, args.mode, args.precision) matches = glob.glob(search_path)", "args.framework, args.model_name, args.mode, args.precision) matches = glob.glob(search_path) if len(matches) > 1: # we", "= os.path.dirname(args.input_graph) if args.input_graph \\ else \"\" in_graph_filename = os.path.basename(args.input_graph) if \\ args.input_graph", "glob.glob(search_path) if len(matches) > 1: # we should never get more than one", "to env variables if any set on host for environment_proxy_setting in [ \"http_proxy\",", "continue env_vars.append(\"--env\") env_vars.append(\"{}={}\".format( environment_proxy_setting, os.environ.get(environment_proxy_setting) )) volume_mounts = [\"--volume\", \"{}:{}\".format(benchmark_scripts, mount_benchmark), \"--volume\", \"{}:{}\".format(args.model_source_dir,", "2018 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the", "in args.model_args: if \"=\" not in custom_arg: raise ValueError(\"Expected model args in the", "\"--volume\", \"{}:/dataset\".format(args.data_location), \"--volume\", \"{}:/checkpoints\".format(args.checkpoint), \"--volume\", \"{}:/in_graph\".format(in_graph_dir)] if mount_output_dir: volume_mounts.extend([ \"--volume\", \"{}:{}\".format(output_dir, output_dir)]) docker_run_cmd", "if args.model_name: # find the path to the model's benchmarks folder search_path =", "Add custom model args as env vars for custom_arg in args.model_args: if \"=\"", "use this file except in compliance with the License. # You may obtain", "under the License. # # SPDX-License-Identifier: EPL-2.0 # from __future__ import absolute_import from", "if args.verbose: print(\"Docker run command:\\n{}\".format(docker_run_cmd)) self._run_docker_cmd(docker_run_cmd) def _run_docker_cmd(self, docker_run_cmd): \"\"\"runs docker proc and", "# validate the shared args first super(LaunchBenchmark, self).validate_args(args) # Check for spaces in", "need to mount log dir otherwise since default is workspace folder mount_output_dir =", "interface\") arg_parser.add_argument( \"--docker-image\", help=\"Specify the docker image/tag to use\", dest=\"docker_image\", default=None, required=True) arg_parser.add_argument(", "doesn't execute \" \"start.sh\", action=\"store_true\") return arg_parser.parse_known_args(args) def validate_args(self, args): \"\"\"validate the args\"\"\"", "\"no_proxy\", ]: if not os.environ.get(environment_proxy_setting): continue env_vars.append(\"--env\") env_vars.append(\"{}={}\".format( environment_proxy_setting, os.environ.get(environment_proxy_setting) )) volume_mounts =", "a docker container with the specified image and environment variables to start running", "script arg_parser = ArgumentParser( parents=[self._common_arg_parser], description=\"Parse args for benchmark interface\") arg_parser.add_argument( \"--docker-image\", help=\"Specify", "needs to set NOINSTALL=True # manually after they get into `--debug` mode #", "mount_intelai_models = \"/workspace/intelai_models\" workspace = os.path.join(mount_benchmark, \"common\", args.framework) mount_output_dir = False output_dir =", "default is workspace folder mount_output_dir = True output_dir = args.output_dir in_graph_dir = os.path.dirname(args.input_graph)", "when debugging, otherwise we might get TTY error if args.debug: docker_run_cmd.append(\"-it\") docker_run_cmd =", "use that path if os.path.isdir(intelai_models): intelai_models = optimized_model_dir mount_benchmark = \"/workspace/benchmarks\" mount_external_models_source =", "= len(dir_list) - 1 - dir_list[::-1].index( args.framework) # grab the use case name", "time without this flag # to get stuff installed # Add custom model", "if args.debug: docker_run_cmd.append(\"-it\") docker_run_cmd = docker_run_cmd + env_vars + volume_mounts + [ \"--privileged\",", ".format(args.framework, args.model_name, args.precision)) # use the benchmarks directory path to find the use", "# if neither benchmark_only or accuracy_only are specified, then enable # benchmark_only as", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "os import signal import subprocess import sys from argparse import ArgumentParser from common", "for environment_proxy_setting in [ \"http_proxy\", \"ftp_proxy\", \"https_proxy\", \"no_proxy\", ]: if not os.environ.get(environment_proxy_setting): continue", "{}\" .format(args.framework, args.model_name, args.precision)) # use the benchmarks directory path to find the", "= glob.glob(search_path) if len(matches) > 1: # we should never get more than", "\"--env\", \"ACCURACY_ONLY={}\".format(args.accuracy_only), \"--env\", \"OUTPUT_RESULTS={}\".format(args.output_results), \"--env\", \"NOINSTALL=False\", \"--env\", \"OUTPUT_DIR={}\".format(output_dir)] # by default we will", "subprocess import sys from argparse import ArgumentParser from common import base_benchmark_util class LaunchBenchmark(base_benchmark_util.BaseBenchmarkUtil):", "\"DATASET_LOCATION_VOL={}\".format(args.data_location), \"--env\", \"CHECKPOINT_DIRECTORY_VOL={}\".format(args.checkpoint), \"--env\", \"EXTERNAL_MODELS_SOURCE_DIRECTORY={}\".format(args.model_source_dir), \"--env\", \"INTELAI_MODELS={}\".format(intelai_models), \"--env\", \"BENCHMARK_SCRIPTS={}\".format(benchmark_scripts), \"--env\", \"SOCKET_ID={}\".format(args.socket_id), \"--env\", \"MODEL_NAME={}\".format(args.model_name),", "the path use_case = str(dir_list[framework_index - 1]) # find the intelai_optimized model directory", "def run_docker_container(self, args): \"\"\" Runs a docker container with the specified image and", "as e: print(\"\\nError: {}\".format(e)) sys.exit(1) self.run_docker_container(args) def parse_args(self, args): super(LaunchBenchmark, self).define_args() # Additional", "{} {} {}\" .format(args.framework, args.model_name, args.precision)) # use the benchmarks directory path to", "might get TTY error if args.debug: docker_run_cmd.append(\"-it\") docker_run_cmd = docker_run_cmd + env_vars +", "environment_proxy_setting, os.environ.get(environment_proxy_setting) )) volume_mounts = [\"--volume\", \"{}:{}\".format(benchmark_scripts, mount_benchmark), \"--volume\", \"{}:{}\".format(args.model_source_dir, mount_external_models_source), \"--volume\", \"{}:{}\".format(intelai_models,", "2.0 (the \"License\"); # you may not use this file except in compliance", "\"--privileged\", \"-u\", \"root:root\", \"-w\", workspace, args.docker_image, \"/bin/bash\"] if not args.debug: docker_run_cmd.append(\"start.sh\") if args.verbose:", "def parse_args(self, args): super(LaunchBenchmark, self).define_args() # Additional args that are only used with", "variables if any set on host for environment_proxy_setting in [ \"http_proxy\", \"ftp_proxy\", \"https_proxy\",", "not supported: {}\". format(args.framework)) # if neither benchmark_only or accuracy_only are specified, then", "\"NUM_CORES={}\".format(args.num_cores), \"--env\", \"NUM_INTER_THREADS={}\".format(args.num_inter_threads), \"--env\", \"NUM_INTRA_THREADS={}\".format(args.num_intra_threads), \"--env\", \"DATASET_LOCATION=/dataset\", \"--env\", \"CHECKPOINT_DIRECTORY=/checkpoints\", \"--env\", \"BENCHMARK_ONLY={}\".format(args.benchmark_only), \"--env\", \"ACCURACY_ONLY={}\".format(args.accuracy_only),", "to use\", dest=\"docker_image\", default=None, required=True) arg_parser.add_argument( \"--debug\", help=\"Launches debug mode which doesn't execute", "we find an optimized model, then we will use that path if os.path.isdir(intelai_models):", "args): \"\"\" Runs a docker container with the specified image and environment variables", "benchmark_scripts, os.pardir, \"models\", use_case, args.framework, args.model_name) # if we find an optimized model,", "for the specific language governing permissions and # limitations under the License. #", "1: # we should never get more than one match raise ValueError(\"Found multiple", "find the use case dir_list = matches[0].split(\"/\") # find the last occurrence of", "True output_dir = args.output_dir in_graph_dir = os.path.dirname(args.input_graph) if args.input_graph \\ else \"\" in_graph_filename", "self.parse_args(sys.argv[1:]) try: self.validate_args(args) except (IOError, ValueError) as e: print(\"\\nError: {}\".format(e)) sys.exit(1) self.run_docker_container(args) def", "\"--env\", \"BENCHMARK_SCRIPTS={}\".format(benchmark_scripts), \"--env\", \"SOCKET_ID={}\".format(args.socket_id), \"--env\", \"MODEL_NAME={}\".format(args.model_name), \"--env\", \"MODE={}\".format(args.mode), \"--env\", \"PRECISION={}\".format(args.precision), \"--env\", \"VERBOSE={}\".format(args.verbose), \"--env\",", "= subprocess.Popen(docker_run_cmd, preexec_fn=os.setsid) try: p.communicate() except KeyboardInterrupt: os.killpg(os.getpgid(p.pid), signal.SIGKILL) if __name__ == \"__main__\":", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "on the specified args \"\"\" def main(self): args, unknown = self.parse_args(sys.argv[1:]) try: self.validate_args(args)", "ArgumentParser( parents=[self._common_arg_parser], description=\"Parse args for benchmark interface\") arg_parser.add_argument( \"--docker-image\", help=\"Specify the docker image/tag", "whitespace(s)\") # validate that we support this framework by checking folder names benchmark_dir", "\"--debug\", help=\"Launches debug mode which doesn't execute \" \"start.sh\", action=\"store_true\") return arg_parser.parse_known_args(args) def", "# -*- coding: utf-8 -*- # # Copyright (c) 2018 Intel Corporation #", "if not os.environ.get(environment_proxy_setting): continue env_vars.append(\"--env\") env_vars.append(\"{}={}\".format( environment_proxy_setting, os.environ.get(environment_proxy_setting) )) volume_mounts = [\"--volume\", \"{}:{}\".format(benchmark_scripts,", "import os import signal import subprocess import sys from argparse import ArgumentParser from", "\"--env\", \"NUM_INTRA_THREADS={}\".format(args.num_intra_threads), \"--env\", \"DATASET_LOCATION=/dataset\", \"--env\", \"CHECKPOINT_DIRECTORY=/checkpoints\", \"--env\", \"BENCHMARK_ONLY={}\".format(args.benchmark_only), \"--env\", \"ACCURACY_ONLY={}\".format(args.accuracy_only), \"--env\", \"OUTPUT_RESULTS={}\".format(args.output_results), \"--env\",", "we might get TTY error if args.debug: docker_run_cmd.append(\"-it\") docker_run_cmd = docker_run_cmd + env_vars", "# # Unless required by applicable law or agreed to in writing, software", "\"NUM_INTRA_THREADS={}\".format(args.num_intra_threads), \"--env\", \"DATASET_LOCATION=/dataset\", \"--env\", \"CHECKPOINT_DIRECTORY=/checkpoints\", \"--env\", \"BENCHMARK_ONLY={}\".format(args.benchmark_only), \"--env\", \"ACCURACY_ONLY={}\".format(args.accuracy_only), \"--env\", \"OUTPUT_RESULTS={}\".format(args.output_results), \"--env\", \"NOINSTALL=False\",", "express or implied. # See the License for the specific language governing permissions", "\"{}:{}\".format(output_dir, output_dir)]) docker_run_cmd = [\"docker\", \"run\"] # only use -it when debugging, otherwise", "if ' ' in args.docker_image: raise ValueError(\"docker image string \" \"should not have", "else \"\" env_vars = [\"--env\", \"DATASET_LOCATION_VOL={}\".format(args.data_location), \"--env\", \"CHECKPOINT_DIRECTORY_VOL={}\".format(args.checkpoint), \"--env\", \"EXTERNAL_MODELS_SOURCE_DIRECTORY={}\".format(args.model_source_dir), \"--env\", \"INTELAI_MODELS={}\".format(intelai_models), \"--env\",", "> 1: # we should never get more than one match raise ValueError(\"Found", "\"NUM_INTER_THREADS={}\".format(args.num_inter_threads), \"--env\", \"NUM_INTRA_THREADS={}\".format(args.num_intra_threads), \"--env\", \"DATASET_LOCATION=/dataset\", \"--env\", \"CHECKPOINT_DIRECTORY=/checkpoints\", \"--env\", \"BENCHMARK_ONLY={}\".format(args.benchmark_only), \"--env\", \"ACCURACY_ONLY={}\".format(args.accuracy_only), \"--env\", \"OUTPUT_RESULTS={}\".format(args.output_results),", "either express or implied. # See the License for the specific language governing", "image and environment variables to start running the benchmarking job. \"\"\" benchmark_scripts =", "= os.path.join( benchmark_scripts, os.pardir, \"models\", use_case, args.framework, args.model_name) # if we find an", "log dir otherwise since default is workspace folder mount_output_dir = True output_dir =", "def _run_docker_cmd(self, docker_run_cmd): \"\"\"runs docker proc and exits on ctrl c\"\"\" p =", "command:\\n{}\".format(docker_run_cmd)) self._run_docker_cmd(docker_run_cmd) def _run_docker_cmd(self, docker_run_cmd): \"\"\"runs docker proc and exits on ctrl c\"\"\"", "mount_benchmark), \"--volume\", \"{}:{}\".format(args.model_source_dir, mount_external_models_source), \"--volume\", \"{}:{}\".format(intelai_models, mount_intelai_models), \"--volume\", \"{}:/dataset\".format(args.data_location), \"--volume\", \"{}:/checkpoints\".format(args.checkpoint), \"--volume\", \"{}:/in_graph\".format(in_graph_dir)]", "format(args.framework)) # if neither benchmark_only or accuracy_only are specified, then enable # benchmark_only", "benchmarking job. \"\"\" benchmark_scripts = os.path.dirname(os.path.realpath(__file__)) intelai_models = os.path.join(benchmark_scripts, os.pardir, \"models\") if args.model_name:", "into `--debug` mode # since they need to run one time without this", "raise ValueError(\"Found multiple model locations for {} {} {}\" .format(args.framework, args.model_name, args.precision)) elif", "# SPDX-License-Identifier: EPL-2.0 # from __future__ import absolute_import from __future__ import division from", "case name from the path use_case = str(dir_list[framework_index - 1]) # find the", "\"--volume\", \"{}:{}\".format(args.model_source_dir, mount_external_models_source), \"--volume\", \"{}:{}\".format(intelai_models, mount_intelai_models), \"--volume\", \"{}:/dataset\".format(args.data_location), \"--volume\", \"{}:/checkpoints\".format(args.checkpoint), \"--volume\", \"{}:/in_graph\".format(in_graph_dir)] if", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "\"MODE={}\".format(args.mode), \"--env\", \"PRECISION={}\".format(args.precision), \"--env\", \"VERBOSE={}\".format(args.verbose), \"--env\", \"BATCH_SIZE={}\".format(args.batch_size), \"--env\", \"WORKSPACE={}\".format(workspace), \"--env\", \"IN_GRAPH=/in_graph/{}\".format(in_graph_filename), \"--env\", \"MOUNT_BENCHMARK={}\".format(mount_benchmark),", "that are only used with the launch script arg_parser = ArgumentParser( parents=[self._common_arg_parser], description=\"Parse", "0: raise ValueError(\"No model was found for {} {} {}\" .format(args.framework, args.model_name, args.precision))", "from the path use_case = str(dir_list[framework_index - 1]) # find the intelai_optimized model", "ValueError(\"docker image string \" \"should not have whitespace(s)\") # validate that we support", "\"MOUNT_INTELAI_MODELS_SOURCE={}\".format(mount_intelai_models), \"--env\", \"USE_CASE={}\".format(use_case), \"--env\", \"FRAMEWORK={}\".format(args.framework), \"--env\", \"NUM_CORES={}\".format(args.num_cores), \"--env\", \"NUM_INTER_THREADS={}\".format(args.num_inter_threads), \"--env\", \"NUM_INTRA_THREADS={}\".format(args.num_intra_threads), \"--env\", \"DATASET_LOCATION=/dataset\",", "to set NOINSTALL=True # manually after they get into `--debug` mode # since", "\"should not have whitespace(s)\") # validate that we support this framework by checking", "# find the path to the model's benchmarks folder search_path = os.path.join( benchmark_scripts,", "\"CHECKPOINT_DIRECTORY_VOL={}\".format(args.checkpoint), \"--env\", \"EXTERNAL_MODELS_SOURCE_DIRECTORY={}\".format(args.model_source_dir), \"--env\", \"INTELAI_MODELS={}\".format(intelai_models), \"--env\", \"BENCHMARK_SCRIPTS={}\".format(benchmark_scripts), \"--env\", \"SOCKET_ID={}\".format(args.socket_id), \"--env\", \"MODEL_NAME={}\".format(args.model_name), \"--env\", \"MODE={}\".format(args.mode),", "validate_args(self, args): \"\"\"validate the args\"\"\" # validate the shared args first super(LaunchBenchmark, self).validate_args(args)", "one match raise ValueError(\"Found multiple model locations for {} {} {}\" .format(args.framework, args.model_name,", "path if os.path.isdir(intelai_models): intelai_models = optimized_model_dir mount_benchmark = \"/workspace/benchmarks\" mount_external_models_source = \"/workspace/models\" mount_intelai_models", "ValueError(\"Expected model args in the format \" \"`name=value` but received: {}\". format(custom_arg)) env_vars.append(\"--env\")", "the list framework_index = len(dir_list) - 1 - dir_list[::-1].index( args.framework) # grab the", "# to get stuff installed # Add custom model args as env vars", "the License. # You may obtain a copy of the License at #", "is workspace folder mount_output_dir = True output_dir = args.output_dir in_graph_dir = os.path.dirname(args.input_graph) if", "\"--env\", \"NUM_CORES={}\".format(args.num_cores), \"--env\", \"NUM_INTER_THREADS={}\".format(args.num_inter_threads), \"--env\", \"NUM_INTRA_THREADS={}\".format(args.num_intra_threads), \"--env\", \"DATASET_LOCATION=/dataset\", \"--env\", \"CHECKPOINT_DIRECTORY=/checkpoints\", \"--env\", \"BENCHMARK_ONLY={}\".format(args.benchmark_only), \"--env\",", "the specified image and environment variables to start running the benchmarking job. \"\"\"", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "not args.accuracy_only: args.benchmark_only = True def run_docker_container(self, args): \"\"\" Runs a docker container", "\"root:root\", \"-w\", workspace, args.docker_image, \"/bin/bash\"] if not args.debug: docker_run_cmd.append(\"start.sh\") if args.verbose: print(\"Docker run", "# we don't need to mount log dir otherwise since default is workspace", "the License. # # SPDX-License-Identifier: EPL-2.0 # from __future__ import absolute_import from __future__", "are specified, then enable # benchmark_only as the default if not args.benchmark_only and", "!= \"/models/benchmarks/common/tensorflow/logs\": # we don't need to mount log dir otherwise since default", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "if not args.benchmark_only and not args.accuracy_only: args.benchmark_only = True def run_docker_container(self, args): \"\"\"", "framework_index = len(dir_list) - 1 - dir_list[::-1].index( args.framework) # grab the use case", "path to the model's benchmarks folder search_path = os.path.join( benchmark_scripts, \"*\", args.framework, args.model_name,", "'logs') if args.output_dir != \"/models/benchmarks/common/tensorflow/logs\": # we don't need to mount log dir", "docker_run_cmd.append(\"-it\") docker_run_cmd = docker_run_cmd + env_vars + volume_mounts + [ \"--privileged\", \"-u\", \"root:root\",", "args.docker_image, \"/bin/bash\"] if not args.debug: docker_run_cmd.append(\"start.sh\") if args.verbose: print(\"Docker run command:\\n{}\".format(docker_run_cmd)) self._run_docker_cmd(docker_run_cmd) def", "(c) 2018 Intel Corporation # # Licensed under the Apache License, Version 2.0", "TTY error if args.debug: docker_run_cmd.append(\"-it\") docker_run_cmd = docker_run_cmd + env_vars + volume_mounts +", "args.model_args: if \"=\" not in custom_arg: raise ValueError(\"Expected model args in the format", "[ \"http_proxy\", \"ftp_proxy\", \"https_proxy\", \"no_proxy\", ]: if not os.environ.get(environment_proxy_setting): continue env_vars.append(\"--env\") env_vars.append(\"{}={}\".format( environment_proxy_setting,", "\"run\"] # only use -it when debugging, otherwise we might get TTY error", "the benchmarking job. \"\"\" benchmark_scripts = os.path.dirname(os.path.realpath(__file__)) intelai_models = os.path.join(benchmark_scripts, os.pardir, \"models\") if", "from __future__ import division from __future__ import print_function import glob import os import", "from __future__ import absolute_import from __future__ import division from __future__ import print_function import", "-it when debugging, otherwise we might get TTY error if args.debug: docker_run_cmd.append(\"-it\") docker_run_cmd", "# find the intelai_optimized model directory optimized_model_dir = os.path.join( benchmark_scripts, os.pardir, \"models\", use_case,", "__future__ import print_function import glob import os import signal import subprocess import sys", "framework by checking folder names benchmark_dir = os.path.dirname(os.path.realpath(__file__)) if glob.glob(\"{}/*/{}\".format(benchmark_dir, args.framework)) == []:", "args.input_graph \\ else \"\" in_graph_filename = os.path.basename(args.input_graph) if \\ args.input_graph else \"\" env_vars", "os.environ.get(environment_proxy_setting) )) volume_mounts = [\"--volume\", \"{}:{}\".format(benchmark_scripts, mount_benchmark), \"--volume\", \"{}:{}\".format(args.model_source_dir, mount_external_models_source), \"--volume\", \"{}:{}\".format(intelai_models, mount_intelai_models),", "benchmark_scripts = os.path.dirname(os.path.realpath(__file__)) intelai_models = os.path.join(benchmark_scripts, os.pardir, \"models\") if args.model_name: # find the", "# since they need to run one time without this flag # to", "as the default if not args.benchmark_only and not args.accuracy_only: args.benchmark_only = True def", "main(self): args, unknown = self.parse_args(sys.argv[1:]) try: self.validate_args(args) except (IOError, ValueError) as e: print(\"\\nError:", "args.precision) matches = glob.glob(search_path) if len(matches) > 1: # we should never get", "os.path.join(benchmark_scripts, os.pardir, \"models\") if args.model_name: # find the path to the model's benchmarks", "for spaces in docker image if ' ' in args.docker_image: raise ValueError(\"docker image", "debug mode which doesn't execute \" \"start.sh\", action=\"store_true\") return arg_parser.parse_known_args(args) def validate_args(self, args):", "+ [ \"--privileged\", \"-u\", \"root:root\", \"-w\", workspace, args.docker_image, \"/bin/bash\"] if not args.debug: docker_run_cmd.append(\"start.sh\")", "output_dir = os.path.join(workspace, 'logs') if args.output_dir != \"/models/benchmarks/common/tensorflow/logs\": # we don't need to", "# Check for spaces in docker image if ' ' in args.docker_image: raise", "\"--env\", \"EXTERNAL_MODELS_SOURCE_DIRECTORY={}\".format(args.model_source_dir), \"--env\", \"INTELAI_MODELS={}\".format(intelai_models), \"--env\", \"BENCHMARK_SCRIPTS={}\".format(benchmark_scripts), \"--env\", \"SOCKET_ID={}\".format(args.socket_id), \"--env\", \"MODEL_NAME={}\".format(args.model_name), \"--env\", \"MODE={}\".format(args.mode), \"--env\",", "with the License. # You may obtain a copy of the License at", "[]: raise ValueError(\"The specified framework is not supported: {}\". format(args.framework)) # if neither", "use\", dest=\"docker_image\", default=None, required=True) arg_parser.add_argument( \"--debug\", help=\"Launches debug mode which doesn't execute \"", "sys from argparse import ArgumentParser from common import base_benchmark_util class LaunchBenchmark(base_benchmark_util.BaseBenchmarkUtil): \"\"\"Launches benchmarking", "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "args.precision)) elif len(matches) == 0: raise ValueError(\"No model was found for {} {}", "env variables if any set on host for environment_proxy_setting in [ \"http_proxy\", \"ftp_proxy\",", "get stuff installed # Add custom model args as env vars for custom_arg", "self.validate_args(args) except (IOError, ValueError) as e: print(\"\\nError: {}\".format(e)) sys.exit(1) self.run_docker_container(args) def parse_args(self, args):", "to get stuff installed # Add custom model args as env vars for", "\"{}:/dataset\".format(args.data_location), \"--volume\", \"{}:/checkpoints\".format(args.checkpoint), \"--volume\", \"{}:/in_graph\".format(in_graph_dir)] if mount_output_dir: volume_mounts.extend([ \"--volume\", \"{}:{}\".format(output_dir, output_dir)]) docker_run_cmd =", "get more than one match raise ValueError(\"Found multiple model locations for {} {}", "output_dir)]) docker_run_cmd = [\"docker\", \"run\"] # only use -it when debugging, otherwise we", "import sys from argparse import ArgumentParser from common import base_benchmark_util class LaunchBenchmark(base_benchmark_util.BaseBenchmarkUtil): \"\"\"Launches", "specified args \"\"\" def main(self): args, unknown = self.parse_args(sys.argv[1:]) try: self.validate_args(args) except (IOError,", "glob import os import signal import subprocess import sys from argparse import ArgumentParser", "env_vars + volume_mounts + [ \"--privileged\", \"-u\", \"root:root\", \"-w\", workspace, args.docker_image, \"/bin/bash\"] if", "names benchmark_dir = os.path.dirname(os.path.realpath(__file__)) if glob.glob(\"{}/*/{}\".format(benchmark_dir, args.framework)) == []: raise ValueError(\"The specified framework", "law or agreed to in writing, software # distributed under the License is", "spaces in docker image if ' ' in args.docker_image: raise ValueError(\"docker image string", "not args.benchmark_only and not args.accuracy_only: args.benchmark_only = True def run_docker_container(self, args): \"\"\" Runs", "the License for the specific language governing permissions and # limitations under the", "benchmark_dir = os.path.dirname(os.path.realpath(__file__)) if glob.glob(\"{}/*/{}\".format(benchmark_dir, args.framework)) == []: raise ValueError(\"The specified framework is", "= True output_dir = args.output_dir in_graph_dir = os.path.dirname(args.input_graph) if args.input_graph \\ else \"\"", "args as env vars for custom_arg in args.model_args: if \"=\" not in custom_arg:", "class LaunchBenchmark(base_benchmark_util.BaseBenchmarkUtil): \"\"\"Launches benchmarking job based on the specified args \"\"\" def main(self):", "image if ' ' in args.docker_image: raise ValueError(\"docker image string \" \"should not", "`--debug` mode # since they need to run one time without this flag", "# if we find an optimized model, then we will use that path", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "# we should never get more than one match raise ValueError(\"Found multiple model", "on host for environment_proxy_setting in [ \"http_proxy\", \"ftp_proxy\", \"https_proxy\", \"no_proxy\", ]: if not", "\"MOUNT_EXTERNAL_MODELS_SOURCE={}\".format(mount_external_models_source), \"--env\", \"MOUNT_INTELAI_MODELS_SOURCE={}\".format(mount_intelai_models), \"--env\", \"USE_CASE={}\".format(use_case), \"--env\", \"FRAMEWORK={}\".format(args.framework), \"--env\", \"NUM_CORES={}\".format(args.num_cores), \"--env\", \"NUM_INTER_THREADS={}\".format(args.num_inter_threads), \"--env\", \"NUM_INTRA_THREADS={}\".format(args.num_intra_threads),", "have whitespace(s)\") # validate that we support this framework by checking folder names", "str(dir_list[framework_index - 1]) # find the intelai_optimized model directory optimized_model_dir = os.path.join( benchmark_scripts,", "absolute_import from __future__ import division from __future__ import print_function import glob import os", "benchmark_only or accuracy_only are specified, then enable # benchmark_only as the default if", "import division from __future__ import print_function import glob import os import signal import", "the intelai_optimized model directory optimized_model_dir = os.path.join( benchmark_scripts, os.pardir, \"models\", use_case, args.framework, args.model_name)", "model, then we will use that path if os.path.isdir(intelai_models): intelai_models = optimized_model_dir mount_benchmark", "python # -*- coding: utf-8 -*- # # Copyright (c) 2018 Intel Corporation", "list framework_index = len(dir_list) - 1 - dir_list[::-1].index( args.framework) # grab the use", "\"start.sh\", action=\"store_true\") return arg_parser.parse_known_args(args) def validate_args(self, args): \"\"\"validate the args\"\"\" # validate the", "preexec_fn=os.setsid) try: p.communicate() except KeyboardInterrupt: os.killpg(os.getpgid(p.pid), signal.SIGKILL) if __name__ == \"__main__\": util =", "= \"/workspace/intelai_models\" workspace = os.path.join(mount_benchmark, \"common\", args.framework) mount_output_dir = False output_dir = os.path.join(workspace,", "\"models\", use_case, args.framework, args.model_name) # if we find an optimized model, then we", "ctrl c\"\"\" p = subprocess.Popen(docker_run_cmd, preexec_fn=os.setsid) try: p.communicate() except KeyboardInterrupt: os.killpg(os.getpgid(p.pid), signal.SIGKILL) if", "# # SPDX-License-Identifier: EPL-2.0 # from __future__ import absolute_import from __future__ import division", "in compliance with the License. # You may obtain a copy of the", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "import absolute_import from __future__ import division from __future__ import print_function import glob import", "with the specified image and environment variables to start running the benchmarking job.", "args.model_name: # find the path to the model's benchmarks folder search_path = os.path.join(", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "shared args first super(LaunchBenchmark, self).validate_args(args) # Check for spaces in docker image if", "first super(LaunchBenchmark, self).validate_args(args) # Check for spaces in docker image if ' '", "host for environment_proxy_setting in [ \"http_proxy\", \"ftp_proxy\", \"https_proxy\", \"no_proxy\", ]: if not os.environ.get(environment_proxy_setting):", "help=\"Specify the docker image/tag to use\", dest=\"docker_image\", default=None, required=True) arg_parser.add_argument( \"--debug\", help=\"Launches debug", "use -it when debugging, otherwise we might get TTY error if args.debug: docker_run_cmd.append(\"-it\")", "and environment variables to start running the benchmarking job. \"\"\" benchmark_scripts = os.path.dirname(os.path.realpath(__file__))", "supported: {}\". format(args.framework)) # if neither benchmark_only or accuracy_only are specified, then enable", "args.docker_image: raise ValueError(\"docker image string \" \"should not have whitespace(s)\") # validate that", "the docker image/tag to use\", dest=\"docker_image\", default=None, required=True) arg_parser.add_argument( \"--debug\", help=\"Launches debug mode", "\"--env\", \"MOUNT_EXTERNAL_MODELS_SOURCE={}\".format(mount_external_models_source), \"--env\", \"MOUNT_INTELAI_MODELS_SOURCE={}\".format(mount_intelai_models), \"--env\", \"USE_CASE={}\".format(use_case), \"--env\", \"FRAMEWORK={}\".format(args.framework), \"--env\", \"NUM_CORES={}\".format(args.num_cores), \"--env\", \"NUM_INTER_THREADS={}\".format(args.num_inter_threads), \"--env\",", "See the License for the specific language governing permissions and # limitations under", "\" \"start.sh\", action=\"store_true\") return arg_parser.parse_known_args(args) def validate_args(self, args): \"\"\"validate the args\"\"\" # validate", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "get into `--debug` mode # since they need to run one time without", "and # limitations under the License. # # SPDX-License-Identifier: EPL-2.0 # from __future__", "env_vars.append(\"{}\".format(custom_arg)) # Add proxy to env variables if any set on host for", "os.environ.get(environment_proxy_setting): continue env_vars.append(\"--env\") env_vars.append(\"{}={}\".format( environment_proxy_setting, os.environ.get(environment_proxy_setting) )) volume_mounts = [\"--volume\", \"{}:{}\".format(benchmark_scripts, mount_benchmark), \"--volume\",", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "benchmarks folder search_path = os.path.join( benchmark_scripts, \"*\", args.framework, args.model_name, args.mode, args.precision) matches =", "args.output_dir != \"/models/benchmarks/common/tensorflow/logs\": # we don't need to mount log dir otherwise since", "p.communicate() except KeyboardInterrupt: os.killpg(os.getpgid(p.pid), signal.SIGKILL) if __name__ == \"__main__\": util = LaunchBenchmark() util.main()", "NOINSTALL=True # manually after they get into `--debug` mode # since they need", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "\"--env\", \"MOUNT_INTELAI_MODELS_SOURCE={}\".format(mount_intelai_models), \"--env\", \"USE_CASE={}\".format(use_case), \"--env\", \"FRAMEWORK={}\".format(args.framework), \"--env\", \"NUM_CORES={}\".format(args.num_cores), \"--env\", \"NUM_INTER_THREADS={}\".format(args.num_inter_threads), \"--env\", \"NUM_INTRA_THREADS={}\".format(args.num_intra_threads), \"--env\",", "if neither benchmark_only or accuracy_only are specified, then enable # benchmark_only as the", "env_vars.append(\"--env\") env_vars.append(\"{}={}\".format( environment_proxy_setting, os.environ.get(environment_proxy_setting) )) volume_mounts = [\"--volume\", \"{}:{}\".format(benchmark_scripts, mount_benchmark), \"--volume\", \"{}:{}\".format(args.model_source_dir, mount_external_models_source),", "\"-u\", \"root:root\", \"-w\", workspace, args.docker_image, \"/bin/bash\"] if not args.debug: docker_run_cmd.append(\"start.sh\") if args.verbose: print(\"Docker", "custom model args as env vars for custom_arg in args.model_args: if \"=\" not", "LaunchBenchmark(base_benchmark_util.BaseBenchmarkUtil): \"\"\"Launches benchmarking job based on the specified args \"\"\" def main(self): args,", "accuracy_only are specified, then enable # benchmark_only as the default if not args.benchmark_only", "the launch script arg_parser = ArgumentParser( parents=[self._common_arg_parser], description=\"Parse args for benchmark interface\") arg_parser.add_argument(", "= optimized_model_dir mount_benchmark = \"/workspace/benchmarks\" mount_external_models_source = \"/workspace/models\" mount_intelai_models = \"/workspace/intelai_models\" workspace =", "raise ValueError(\"Expected model args in the format \" \"`name=value` but received: {}\". format(custom_arg))", "# # Copyright (c) 2018 Intel Corporation # # Licensed under the Apache", "= os.path.join( benchmark_scripts, \"*\", args.framework, args.model_name, args.mode, args.precision) matches = glob.glob(search_path) if len(matches)", "== 0: raise ValueError(\"No model was found for {} {} {}\" .format(args.framework, args.model_name,", "multiple model locations for {} {} {}\" .format(args.framework, args.model_name, args.precision)) elif len(matches) ==", "\"MOUNT_BENCHMARK={}\".format(mount_benchmark), \"--env\", \"MOUNT_EXTERNAL_MODELS_SOURCE={}\".format(mount_external_models_source), \"--env\", \"MOUNT_INTELAI_MODELS_SOURCE={}\".format(mount_intelai_models), \"--env\", \"USE_CASE={}\".format(use_case), \"--env\", \"FRAMEWORK={}\".format(args.framework), \"--env\", \"NUM_CORES={}\".format(args.num_cores), \"--env\", \"NUM_INTER_THREADS={}\".format(args.num_inter_threads),", "= args.output_dir in_graph_dir = os.path.dirname(args.input_graph) if args.input_graph \\ else \"\" in_graph_filename = os.path.basename(args.input_graph)", "args.benchmark_only and not args.accuracy_only: args.benchmark_only = True def run_docker_container(self, args): \"\"\" Runs a", "\"INTELAI_MODELS={}\".format(intelai_models), \"--env\", \"BENCHMARK_SCRIPTS={}\".format(benchmark_scripts), \"--env\", \"SOCKET_ID={}\".format(args.socket_id), \"--env\", \"MODEL_NAME={}\".format(args.model_name), \"--env\", \"MODE={}\".format(args.mode), \"--env\", \"PRECISION={}\".format(args.precision), \"--env\", \"VERBOSE={}\".format(args.verbose),", "\"OUTPUT_RESULTS={}\".format(args.output_results), \"--env\", \"NOINSTALL=False\", \"--env\", \"OUTPUT_DIR={}\".format(output_dir)] # by default we will install, user needs", "but received: {}\". format(custom_arg)) env_vars.append(\"--env\") env_vars.append(\"{}\".format(custom_arg)) # Add proxy to env variables if", "args \"\"\" def main(self): args, unknown = self.parse_args(sys.argv[1:]) try: self.validate_args(args) except (IOError, ValueError)", "__future__ import division from __future__ import print_function import glob import os import signal", "dir_list = matches[0].split(\"/\") # find the last occurrence of framework in the list", "\"--docker-image\", help=\"Specify the docker image/tag to use\", dest=\"docker_image\", default=None, required=True) arg_parser.add_argument( \"--debug\", help=\"Launches", "[\"docker\", \"run\"] # only use -it when debugging, otherwise we might get TTY", "Runs a docker container with the specified image and environment variables to start", "benchmark_scripts, \"*\", args.framework, args.model_name, args.mode, args.precision) matches = glob.glob(search_path) if len(matches) > 1:", "based on the specified args \"\"\" def main(self): args, unknown = self.parse_args(sys.argv[1:]) try:", "matches[0].split(\"/\") # find the last occurrence of framework in the list framework_index =", "format(custom_arg)) env_vars.append(\"--env\") env_vars.append(\"{}\".format(custom_arg)) # Add proxy to env variables if any set on", "os.path.join(workspace, 'logs') if args.output_dir != \"/models/benchmarks/common/tensorflow/logs\": # we don't need to mount log", "\"--env\", \"FRAMEWORK={}\".format(args.framework), \"--env\", \"NUM_CORES={}\".format(args.num_cores), \"--env\", \"NUM_INTER_THREADS={}\".format(args.num_inter_threads), \"--env\", \"NUM_INTRA_THREADS={}\".format(args.num_intra_threads), \"--env\", \"DATASET_LOCATION=/dataset\", \"--env\", \"CHECKPOINT_DIRECTORY=/checkpoints\", \"--env\",", "os.path.dirname(args.input_graph) if args.input_graph \\ else \"\" in_graph_filename = os.path.basename(args.input_graph) if \\ args.input_graph else", "Version 2.0 (the \"License\"); # you may not use this file except in", "except in compliance with the License. # You may obtain a copy of", "[\"--env\", \"DATASET_LOCATION_VOL={}\".format(args.data_location), \"--env\", \"CHECKPOINT_DIRECTORY_VOL={}\".format(args.checkpoint), \"--env\", \"EXTERNAL_MODELS_SOURCE_DIRECTORY={}\".format(args.model_source_dir), \"--env\", \"INTELAI_MODELS={}\".format(intelai_models), \"--env\", \"BENCHMARK_SCRIPTS={}\".format(benchmark_scripts), \"--env\", \"SOCKET_ID={}\".format(args.socket_id), \"--env\",", "model locations for {} {} {}\" .format(args.framework, args.model_name, args.precision)) elif len(matches) == 0:", "default if not args.benchmark_only and not args.accuracy_only: args.benchmark_only = True def run_docker_container(self, args):", "- 1 - dir_list[::-1].index( args.framework) # grab the use case name from the", "if args.input_graph \\ else \"\" in_graph_filename = os.path.basename(args.input_graph) if \\ args.input_graph else \"\"", "use case dir_list = matches[0].split(\"/\") # find the last occurrence of framework in", "we support this framework by checking folder names benchmark_dir = os.path.dirname(os.path.realpath(__file__)) if glob.glob(\"{}/*/{}\".format(benchmark_dir,", "the args\"\"\" # validate the shared args first super(LaunchBenchmark, self).validate_args(args) # Check for", "that path if os.path.isdir(intelai_models): intelai_models = optimized_model_dir mount_benchmark = \"/workspace/benchmarks\" mount_external_models_source = \"/workspace/models\"", "benchmarks directory path to find the use case dir_list = matches[0].split(\"/\") # find", "workspace, args.docker_image, \"/bin/bash\"] if not args.debug: docker_run_cmd.append(\"start.sh\") if args.verbose: print(\"Docker run command:\\n{}\".format(docker_run_cmd)) self._run_docker_cmd(docker_run_cmd)", "\"/models/benchmarks/common/tensorflow/logs\": # we don't need to mount log dir otherwise since default is", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "# from __future__ import absolute_import from __future__ import division from __future__ import print_function", "super(LaunchBenchmark, self).define_args() # Additional args that are only used with the launch script", "\"--volume\", \"{}:{}\".format(output_dir, output_dir)]) docker_run_cmd = [\"docker\", \"run\"] # only use -it when debugging,", "EPL-2.0 # from __future__ import absolute_import from __future__ import division from __future__ import", "os.pardir, \"models\", use_case, args.framework, args.model_name) # if we find an optimized model, then", "which doesn't execute \" \"start.sh\", action=\"store_true\") return arg_parser.parse_known_args(args) def validate_args(self, args): \"\"\"validate the", "args.framework)) == []: raise ValueError(\"The specified framework is not supported: {}\". format(args.framework)) #", "self.run_docker_container(args) def parse_args(self, args): super(LaunchBenchmark, self).define_args() # Additional args that are only used", "for {} {} {}\" .format(args.framework, args.model_name, args.precision)) # use the benchmarks directory path", "\"USE_CASE={}\".format(use_case), \"--env\", \"FRAMEWORK={}\".format(args.framework), \"--env\", \"NUM_CORES={}\".format(args.num_cores), \"--env\", \"NUM_INTER_THREADS={}\".format(args.num_inter_threads), \"--env\", \"NUM_INTRA_THREADS={}\".format(args.num_intra_threads), \"--env\", \"DATASET_LOCATION=/dataset\", \"--env\", \"CHECKPOINT_DIRECTORY=/checkpoints\",", "\"OUTPUT_DIR={}\".format(output_dir)] # by default we will install, user needs to set NOINSTALL=True #", "by default we will install, user needs to set NOINSTALL=True # manually after", "for custom_arg in args.model_args: if \"=\" not in custom_arg: raise ValueError(\"Expected model args", "try: p.communicate() except KeyboardInterrupt: os.killpg(os.getpgid(p.pid), signal.SIGKILL) if __name__ == \"__main__\": util = LaunchBenchmark()", "and exits on ctrl c\"\"\" p = subprocess.Popen(docker_run_cmd, preexec_fn=os.setsid) try: p.communicate() except KeyboardInterrupt:", "job. \"\"\" benchmark_scripts = os.path.dirname(os.path.realpath(__file__)) intelai_models = os.path.join(benchmark_scripts, os.pardir, \"models\") if args.model_name: #", "launch script arg_parser = ArgumentParser( parents=[self._common_arg_parser], description=\"Parse args for benchmark interface\") arg_parser.add_argument( \"--docker-image\",", "Intel Corporation # # Licensed under the Apache License, Version 2.0 (the \"License\");", "self._run_docker_cmd(docker_run_cmd) def _run_docker_cmd(self, docker_run_cmd): \"\"\"runs docker proc and exits on ctrl c\"\"\" p", "{} {} {}\" .format(args.framework, args.model_name, args.precision)) elif len(matches) == 0: raise ValueError(\"No model", "we will use that path if os.path.isdir(intelai_models): intelai_models = optimized_model_dir mount_benchmark = \"/workspace/benchmarks\"", "then we will use that path if os.path.isdir(intelai_models): intelai_models = optimized_model_dir mount_benchmark =", "model args as env vars for custom_arg in args.model_args: if \"=\" not in", "else \"\" in_graph_filename = os.path.basename(args.input_graph) if \\ args.input_graph else \"\" env_vars = [\"--env\",", "\"CHECKPOINT_DIRECTORY=/checkpoints\", \"--env\", \"BENCHMARK_ONLY={}\".format(args.benchmark_only), \"--env\", \"ACCURACY_ONLY={}\".format(args.accuracy_only), \"--env\", \"OUTPUT_RESULTS={}\".format(args.output_results), \"--env\", \"NOINSTALL=False\", \"--env\", \"OUTPUT_DIR={}\".format(output_dir)] # by", "dir_list[::-1].index( args.framework) # grab the use case name from the path use_case =", "# manually after they get into `--debug` mode # since they need to", "import ArgumentParser from common import base_benchmark_util class LaunchBenchmark(base_benchmark_util.BaseBenchmarkUtil): \"\"\"Launches benchmarking job based on", "Add proxy to env variables if any set on host for environment_proxy_setting in", "than one match raise ValueError(\"Found multiple model locations for {} {} {}\" .format(args.framework,", "workspace = os.path.join(mount_benchmark, \"common\", args.framework) mount_output_dir = False output_dir = os.path.join(workspace, 'logs') if", "\"BENCHMARK_ONLY={}\".format(args.benchmark_only), \"--env\", \"ACCURACY_ONLY={}\".format(args.accuracy_only), \"--env\", \"OUTPUT_RESULTS={}\".format(args.output_results), \"--env\", \"NOINSTALL=False\", \"--env\", \"OUTPUT_DIR={}\".format(output_dir)] # by default we", "\"http_proxy\", \"ftp_proxy\", \"https_proxy\", \"no_proxy\", ]: if not os.environ.get(environment_proxy_setting): continue env_vars.append(\"--env\") env_vars.append(\"{}={}\".format( environment_proxy_setting, os.environ.get(environment_proxy_setting)", "intelai_optimized model directory optimized_model_dir = os.path.join( benchmark_scripts, os.pardir, \"models\", use_case, args.framework, args.model_name) #", "docker proc and exits on ctrl c\"\"\" p = subprocess.Popen(docker_run_cmd, preexec_fn=os.setsid) try: p.communicate()", "the specific language governing permissions and # limitations under the License. # #", "\"models\") if args.model_name: # find the path to the model's benchmarks folder search_path", "this flag # to get stuff installed # Add custom model args as", "\"-w\", workspace, args.docker_image, \"/bin/bash\"] if not args.debug: docker_run_cmd.append(\"start.sh\") if args.verbose: print(\"Docker run command:\\n{}\".format(docker_run_cmd))", "for benchmark interface\") arg_parser.add_argument( \"--docker-image\", help=\"Specify the docker image/tag to use\", dest=\"docker_image\", default=None,", "from __future__ import print_function import glob import os import signal import subprocess import", "unknown = self.parse_args(sys.argv[1:]) try: self.validate_args(args) except (IOError, ValueError) as e: print(\"\\nError: {}\".format(e)) sys.exit(1)", "= True def run_docker_container(self, args): \"\"\" Runs a docker container with the specified", "if args.output_dir != \"/models/benchmarks/common/tensorflow/logs\": # we don't need to mount log dir otherwise", "+ env_vars + volume_mounts + [ \"--privileged\", \"-u\", \"root:root\", \"-w\", workspace, args.docker_image, \"/bin/bash\"]", "not args.debug: docker_run_cmd.append(\"start.sh\") if args.verbose: print(\"Docker run command:\\n{}\".format(docker_run_cmd)) self._run_docker_cmd(docker_run_cmd) def _run_docker_cmd(self, docker_run_cmd): \"\"\"runs", "# only use -it when debugging, otherwise we might get TTY error if", "don't need to mount log dir otherwise since default is workspace folder mount_output_dir", "docker_run_cmd + env_vars + volume_mounts + [ \"--privileged\", \"-u\", \"root:root\", \"-w\", workspace, args.docker_image,", "without this flag # to get stuff installed # Add custom model args", "Additional args that are only used with the launch script arg_parser = ArgumentParser(", "use_case, args.framework, args.model_name) # if we find an optimized model, then we will", "\"ftp_proxy\", \"https_proxy\", \"no_proxy\", ]: if not os.environ.get(environment_proxy_setting): continue env_vars.append(\"--env\") env_vars.append(\"{}={}\".format( environment_proxy_setting, os.environ.get(environment_proxy_setting) ))", "os.path.join(mount_benchmark, \"common\", args.framework) mount_output_dir = False output_dir = os.path.join(workspace, 'logs') if args.output_dir !=", "import signal import subprocess import sys from argparse import ArgumentParser from common import", "{}\" .format(args.framework, args.model_name, args.precision)) elif len(matches) == 0: raise ValueError(\"No model was found", "ValueError(\"Found multiple model locations for {} {} {}\" .format(args.framework, args.model_name, args.precision)) elif len(matches)", "- 1]) # find the intelai_optimized model directory optimized_model_dir = os.path.join( benchmark_scripts, os.pardir,", "docker image/tag to use\", dest=\"docker_image\", default=None, required=True) arg_parser.add_argument( \"--debug\", help=\"Launches debug mode which", "default=None, required=True) arg_parser.add_argument( \"--debug\", help=\"Launches debug mode which doesn't execute \" \"start.sh\", action=\"store_true\")", "' in args.docker_image: raise ValueError(\"docker image string \" \"should not have whitespace(s)\") #", "print(\"\\nError: {}\".format(e)) sys.exit(1) self.run_docker_container(args) def parse_args(self, args): super(LaunchBenchmark, self).define_args() # Additional args that", "# Additional args that are only used with the launch script arg_parser =", "any set on host for environment_proxy_setting in [ \"http_proxy\", \"ftp_proxy\", \"https_proxy\", \"no_proxy\", ]:", "args.debug: docker_run_cmd.append(\"start.sh\") if args.verbose: print(\"Docker run command:\\n{}\".format(docker_run_cmd)) self._run_docker_cmd(docker_run_cmd) def _run_docker_cmd(self, docker_run_cmd): \"\"\"runs docker", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "variables to start running the benchmarking job. \"\"\" benchmark_scripts = os.path.dirname(os.path.realpath(__file__)) intelai_models =", "1]) # find the intelai_optimized model directory optimized_model_dir = os.path.join( benchmark_scripts, os.pardir, \"models\",", "return arg_parser.parse_known_args(args) def validate_args(self, args): \"\"\"validate the args\"\"\" # validate the shared args", "manually after they get into `--debug` mode # since they need to run", "\"=\" not in custom_arg: raise ValueError(\"Expected model args in the format \" \"`name=value`", "c\"\"\" p = subprocess.Popen(docker_run_cmd, preexec_fn=os.setsid) try: p.communicate() except KeyboardInterrupt: os.killpg(os.getpgid(p.pid), signal.SIGKILL) if __name__", "if glob.glob(\"{}/*/{}\".format(benchmark_dir, args.framework)) == []: raise ValueError(\"The specified framework is not supported: {}\".", "checking folder names benchmark_dir = os.path.dirname(os.path.realpath(__file__)) if glob.glob(\"{}/*/{}\".format(benchmark_dir, args.framework)) == []: raise ValueError(\"The", "or accuracy_only are specified, then enable # benchmark_only as the default if not" ]
[ "\"\"\" pass def test_road_segmenter_list(self): \"\"\" The road_segmenter function should return a list :return:", "\"\"\" for road in self.road_net: road = convert(road) self.assertIsInstance(road[\"the_geom\"], dict, \"geometry_to_list should return", ":return: Nothing \"\"\" margin = 3 for road in self.road_net: road = convert(road)", "road was segmented, but should not have been.\") elif road_coords_length >= 2*self.min_coordinates_length and", "= self.road_net_segmented[x][\"the_geom\"] self.assertNotEqual(road, other_road, \"Duplicate segment geometry coordinates\") def test_missing_coordinates(self): \"\"\" All original", "class TestSegmenting(unittest.TestCase): @classmethod def setUpClass(cls): cls.kommune = 5001 cls.vegref = \"kg\" cls.max_segment_distance =", "of roads segments, the split segments should always have a length of 2", "be similar to the length before segmentation, within a margin given by the", "from apps.api.segmenter.road_segmenter import geometry_to_list from apps.data.road_segmenting.road_fetcher import vegnet_to_geojson from apps.data.road_segmenting.road_filter import filter_road from", "vegnet_to_geojson from apps.data.road_segmenting.road_filter import filter_road from vapi.constants import MAX_SEGMENT_LENGTH, MIN_COORDINATES_LENGTH from api.segmenter.calculate_distance import", "road_segmented_length = len(road_segmented) if road_distance < self.max_segment_distance: self.assertTrue(road_segmented_length == 1, \"This road was", "the setUpClass is a bit funky and the road_net does not stay filtered", "self.assertEqual(curr_segment[\"the_geom\"][\"coordinates\"][0], prev_segment[\"the_geom\"][\"coordinates\"][end_coordinate], \"Segments are not correctly chained\") def test_split_segment_negative_length(self): \"\"\" No road segments", "@classmethod def setUpClass(cls): cls.kommune = 5001 cls.vegref = \"kg\" cls.max_segment_distance = MAX_SEGMENT_LENGTH cls.min_coordinates_length", "of here. road_net_list = [] for road in cls.road_net: road_net_list.append(filter_road(road)) cls.road_net_segmented = segment_network(road_net_list,", "def test_road_segmenter_list(self): \"\"\" The road_segmenter function should return a list :return: Nothing \"\"\"", "segmentation, within a margin given by the variable \"margin\" :return: Nothing \"\"\" margin", "= segment[\"the_geom\"].count(\",\") self.assertGreaterEqual(coordinates_amount+1, self.min_coordinates_length, error_message) def test_road_filter(self): \"\"\" The road_filter function should return", "= network[0], network[1][\"features\"] # Apparently the setUpClass is a bit funky and the", "shorter than that. In other words the segmented road should still be only", "be present after segmenting road network :return: Nothing \"\"\" for road in self.road_net:", "\"\"\" Needs to be here for the tests to run \"\"\" pass def", "\"Segment has less than \" + str(self.min_coordinates_length) + \" GPS coordinates\" for segment", "a dictionary containing coordinates as a list, otherwise the segmenter can't split segments", "# so instead it is done in each test function it is needed", "always have a length of 2 or more :return: Nothing \"\"\" error_message =", "be the same as the count of coordinates coordinates_amount = segment[\"the_geom\"].count(\",\") self.assertGreaterEqual(coordinates_amount+1, self.min_coordinates_length,", "segmenting\") def test_over_and_undersegmenting(self): \"\"\" The segmenter should only run on segments that are", "should only run on segments that are over the limit in length, it", "Nothing \"\"\" for segment in self.road_net_segmented: self.assertGreater(segment[\"stretchdistance\"], 0, \"Stretchdistance must be of at", "a string, otherwise segmentation will crash in later stages :return: Nothing \"\"\" for", "road = convert(road) coordinates_original = road[\"the_geom\"][\"coordinates\"] road_segmented = split_segment(road, self.max_segment_distance, [], self.min_coordinates_length) coordinates_segmented", "segments should always have a length of 2 or more :return: Nothing \"\"\"", "run on segments that are over the limit in length, it should never", "list, \"The road segmenter did not return a list\") def test_road_segmenter_list_elements(self): \"\"\" Every", "total distance of the segmented road should be similar to the length before", "here for the tests to run \"\"\" pass def test_road_segmenter_list(self): \"\"\" The road_segmenter", "is needed instead of here. road_net_list = [] for road in cls.road_net: road_net_list.append(filter_road(road))", "and the segmented length is \" \"too large\") def test_split_segment_chaining(self): \"\"\" Every connected", "the variable \"margin\" :return: Nothing \"\"\" margin = 3 for road in self.road_net:", "filter_road(road) self.assertIsInstance(road[\"the_geom\"], str, \"road_filter should turn geometry into a string\") def test_geometry_conversion(self): \"\"\"", "other_road, \"Duplicate segment geometry coordinates\") def test_missing_coordinates(self): \"\"\" All original coordinates should still", "import geometry_to_list from apps.data.road_segmenting.road_fetcher import vegnet_to_geojson from apps.data.road_segmenting.road_filter import filter_road from vapi.constants import", "= 0 for segment in road_segmented: length_after += calculate_road_length_simple(segment[\"the_geom\"][\"coordinates\"]) self.assertLess(abs(length_after - length_before), margin,", "[] for segment in road_segmented: coordinates_segmented.extend(segment[\"the_geom\"][\"coordinates\"]) for coordinate in coordinates_original: self.assertTrue(coordinate in coordinates_segmented,", "string\") def test_geometry_conversion(self): \"\"\" The geometry_to_list function should return a dictionary containing coordinates", "test_over_and_undersegmenting(self): \"\"\" The segmenter should only run on segments that are over the", "does not stay filtered after setUpClass is run, # so instead it is", "\"\"\" self.assertIsInstance(self.road_net_segmented, list, \"The road segmenter did not return a list\") def test_road_segmenter_list_elements(self):", "road_segmented = split_segment(road, self.max_segment_distance, [], self.min_coordinates_length) coordinates_segmented = [] for segment in road_segmented:", "test_road_segmenter_list(self): \"\"\" The road_segmenter function should return a list :return: Nothing \"\"\" self.assertIsInstance(self.road_net_segmented,", "segments with the same coordinates \"\"\" length = len(self.road_net_segmented)-1 for i in range(length):", "[], self.min_coordinates_length) length_after = 0 for segment in road_segmented: length_after += calculate_road_length_simple(segment[\"the_geom\"][\"coordinates\"]) self.assertLess(abs(length_after", "= convert(road) road_segmented = split_segment(road, self.max_segment_distance, [], self.min_coordinates_length) for i in range(1, len(road_segmented)):", "dict, \"geometry_to_list should return a \" \"dictionary\") self.assertIsInstance(road[\"the_geom\"][\"coordinates\"], list, \"geometry_to_list should return a", "return a dictionary containing coordinates as a list, otherwise the segmenter can't split", "error_message = \"Not all elements in the split list are of type dict", "here. road_net_list = [] for road in cls.road_net: road_net_list.append(filter_road(road)) cls.road_net_segmented = segment_network(road_net_list, cls.max_segment_distance,", "that. In other words the segmented road should still be only one segment", "Nothing \"\"\" i = 0 for road in self.road_net: i += 1 converted_road", "= convert(road) length_before = calculate_road_length_simple(road[\"the_geom\"][\"coordinates\"]) road_segmented = split_segment(road, self.max_segment_distance, [], self.min_coordinates_length) length_after =", "variable \"margin\" :return: Nothing \"\"\" margin = 3 for road in self.road_net: road", "of the segmented road should be similar to the length before segmentation, within", "for road in self.road_net: i += 1 converted_road = convert(road) road_coords_length = len(converted_road[\"the_geom\"][\"coordinates\"])", "same coordinates \"\"\" length = len(self.road_net_segmented)-1 for i in range(length): road = self.road_net_segmented[i][\"the_geom\"]", "test_duplicate_segments(self): \"\"\" Test if there are multiple segments with the same coordinates \"\"\"", "self.road_net: road = convert(road) self.assertIsInstance(road[\"the_geom\"], dict, \"geometry_to_list should return a \" \"dictionary\") self.assertIsInstance(road[\"the_geom\"][\"coordinates\"],", "in cls.road_net: road_net_list.append(filter_road(road)) cls.road_net_segmented = segment_network(road_net_list, cls.max_segment_distance, cls.min_coordinates_length) def setUp(self): \"\"\" Needs to", "i in range(1, len(road_segmented)): curr_segment = road_segmented[i] prev_segment = road_segmented[i-1] end_coordinate = len(prev_segment[\"the_geom\"][\"coordinates\"])-1", "error_message) def test_split_segment_geometry_len(self): \"\"\" Given a list of roads segments, the split segments", "segmenting road network :return: Nothing \"\"\" for road in self.road_net: road = convert(road)", "import vegnet_to_geojson from apps.data.road_segmenting.road_filter import filter_road from vapi.constants import MAX_SEGMENT_LENGTH, MIN_COORDINATES_LENGTH from api.segmenter.calculate_distance", "have a negative road length :return: Nothing \"\"\" for segment in self.road_net_segmented: self.assertGreater(segment[\"stretchdistance\"],", "in range(i+1, length): other_road = self.road_net_segmented[x][\"the_geom\"] self.assertNotEqual(road, other_road, \"Duplicate segment geometry coordinates\") def", "def test_over_and_undersegmenting(self): \"\"\" The segmenter should only run on segments that are over", "str, \"road_filter should turn geometry into a string\") def test_geometry_conversion(self): \"\"\" The geometry_to_list", "apps.data.road_segmenting.road_filter import filter_road from vapi.constants import MAX_SEGMENT_LENGTH, MIN_COORDINATES_LENGTH from api.segmenter.calculate_distance import calculate_road_length_simple from", "\"\"\" Test if there are multiple segments with the same coordinates \"\"\" length", "\" \"too large\") def test_split_segment_chaining(self): \"\"\" Every connected segment should start with the", "for road in self.road_net: road = convert(road) road_segmented = split_segment(road, self.max_segment_distance, [], self.min_coordinates_length)", "coordinates_segmented, \"Missing coordinate after segmenting\") def test_over_and_undersegmenting(self): \"\"\" The segmenter should only run", "and road_distance > self.max_segment_distance: self.assertTrue(road_segmented_length > 1, (\"This road should have been segmented,", "prev_segment = road_segmented[i-1] end_coordinate = len(prev_segment[\"the_geom\"][\"coordinates\"])-1 self.assertEqual(curr_segment[\"the_geom\"][\"coordinates\"][0], prev_segment[\"the_geom\"][\"coordinates\"][end_coordinate], \"Segments are not correctly chained\")", "be of at least 1 meter\") def test_duplicate_segments(self): \"\"\" Test if there are", "for x in range(i+1, length): other_road = self.road_net_segmented[x][\"the_geom\"] self.assertNotEqual(road, other_road, \"Duplicate segment geometry", "into a list\") def test_calculate_road_length(self): \"\"\" The total distance of the segmented road", "is done in each test function it is needed instead of here. road_net_list", "so instead it is done in each test function it is needed instead", "cls.count, cls.road_net = network[0], network[1][\"features\"] # Apparently the setUpClass is a bit funky", "segment_network, split_segment def convert(road): road = filter_road(road) road[\"the_geom\"] = geometry_to_list(road[\"the_geom\"]) return road class", "segments :return: Nothing \"\"\" for road in self.road_net: road = convert(road) self.assertIsInstance(road[\"the_geom\"], dict,", "end gps point of the previous segment :return: Nothing \"\"\" for road in", "\"Stretchdistance must be of at least 1 meter\") def test_duplicate_segments(self): \"\"\" Test if", "self.road_net: road = convert(road) coordinates_original = road[\"the_geom\"][\"coordinates\"] road_segmented = split_segment(road, self.max_segment_distance, [], self.min_coordinates_length)", "road length :return: Nothing \"\"\" for segment in self.road_net_segmented: self.assertGreater(segment[\"stretchdistance\"], 0, \"Stretchdistance must", "coordinate in coordinates_original: self.assertTrue(coordinate in coordinates_segmented, \"Missing coordinate after segmenting\") def test_over_and_undersegmenting(self): \"\"\"", "split segments should always have a length of 2 or more :return: Nothing", "from apps.data.road_segmenting.road_fetcher import vegnet_to_geojson from apps.data.road_segmenting.road_filter import filter_road from vapi.constants import MAX_SEGMENT_LENGTH, MIN_COORDINATES_LENGTH", "= 3 for road in self.road_net: road = convert(road) length_before = calculate_road_length_simple(road[\"the_geom\"][\"coordinates\"]) road_segmented", "= MAX_SEGMENT_LENGTH cls.min_coordinates_length = MIN_COORDINATES_LENGTH network = vegnet_to_geojson(cls.kommune, cls.vegref) cls.count, cls.road_net = network[0],", "self.road_net_segmented: self.assertIsInstance(segment, dict, error_message) def test_split_segment_geometry_len(self): \"\"\" Given a list of roads segments,", "only one segment :return: Nothing \"\"\" i = 0 for road in self.road_net:", "1 converted_road = convert(road) road_coords_length = len(converted_road[\"the_geom\"][\"coordinates\"]) road_distance = calculate_road_length_simple(converted_road[\"the_geom\"][\"coordinates\"]) road_segmented = segment_network([filter_road(road)],", "list\") def test_calculate_road_length(self): \"\"\" The total distance of the segmented road should be", "segment in self.road_net_segmented: self.assertGreater(segment[\"stretchdistance\"], 0, \"Stretchdistance must be of at least 1 meter\")", "run \"\"\" pass def test_road_segmenter_list(self): \"\"\" The road_segmenter function should return a list", "of commas+1 should be the same as the count of coordinates coordinates_amount =", "same as the count of coordinates coordinates_amount = segment[\"the_geom\"].count(\",\") self.assertGreaterEqual(coordinates_amount+1, self.min_coordinates_length, error_message) def", "- length_before), margin, \"The difference between the original \" \"length and the segmented", "= road_segmented[i-1] end_coordinate = len(prev_segment[\"the_geom\"][\"coordinates\"])-1 self.assertEqual(curr_segment[\"the_geom\"][\"coordinates\"][0], prev_segment[\"the_geom\"][\"coordinates\"][end_coordinate], \"Segments are not correctly chained\") def", "The total distance of the segmented road should be similar to the length", "length of 2 or more :return: Nothing \"\"\" error_message = \"Segment has less", "function should return a string, otherwise segmentation will crash in later stages :return:", "3 for road in self.road_net: road = convert(road) length_before = calculate_road_length_simple(road[\"the_geom\"][\"coordinates\"]) road_segmented =", "\"coordinates into a list\") def test_calculate_road_length(self): \"\"\" The total distance of the segmented", "= split_segment(road, self.max_segment_distance, [], self.min_coordinates_length) coordinates_segmented = [] for segment in road_segmented: coordinates_segmented.extend(segment[\"the_geom\"][\"coordinates\"])", "by the variable \"margin\" :return: Nothing \"\"\" margin = 3 for road in", "import unittest from apps.api.segmenter.road_segmenter import geometry_to_list from apps.data.road_segmenting.road_fetcher import vegnet_to_geojson from apps.data.road_segmenting.road_filter import", "be a dict :return: Nothing \"\"\" error_message = \"Not all elements in the", "a string\") def test_geometry_conversion(self): \"\"\" The geometry_to_list function should return a dictionary containing", "it is needed instead of here. road_net_list = [] for road in cls.road_net:", "def test_road_segmenter_list_elements(self): \"\"\" Every element in the split segments should be a dict", "road_coords_length >= 2*self.min_coordinates_length and road_distance > self.max_segment_distance: self.assertTrue(road_segmented_length > 1, (\"This road should", ">= 2*self.min_coordinates_length and road_distance > self.max_segment_distance: self.assertTrue(road_segmented_length > 1, (\"This road should have", "\"\"\" The geometry_to_list function should return a dictionary containing coordinates as a list,", "stages :return: Nothing \"\"\" for road in self.road_net: road = filter_road(road) self.assertIsInstance(road[\"the_geom\"], str,", "test function it is needed instead of here. road_net_list = [] for road", "type dict \\n\" for segment in self.road_net_segmented: self.assertIsInstance(segment, dict, error_message) def test_split_segment_geometry_len(self): \"\"\"", "length is \" \"too large\") def test_split_segment_chaining(self): \"\"\" Every connected segment should start", "def test_duplicate_segments(self): \"\"\" Test if there are multiple segments with the same coordinates", "= geometry_to_list(road[\"the_geom\"]) return road class TestSegmenting(unittest.TestCase): @classmethod def setUpClass(cls): cls.kommune = 5001 cls.vegref", "MAX_SEGMENT_LENGTH, MIN_COORDINATES_LENGTH from api.segmenter.calculate_distance import calculate_road_length_simple from api.segmenter.road_segmenter import segment_network, split_segment def convert(road):", "crash in later stages :return: Nothing \"\"\" for road in self.road_net: road =", "segment_network(road_net_list, cls.max_segment_distance, cls.min_coordinates_length) def setUp(self): \"\"\" Needs to be here for the tests", "[] for road in cls.road_net: road_net_list.append(filter_road(road)) cls.road_net_segmented = segment_network(road_net_list, cls.max_segment_distance, cls.min_coordinates_length) def setUp(self):", "on segments that are over the limit in length, it should never segment", "segmenter can't split segments :return: Nothing \"\"\" for road in self.road_net: road =", "as the count of coordinates coordinates_amount = segment[\"the_geom\"].count(\",\") self.assertGreaterEqual(coordinates_amount+1, self.min_coordinates_length, error_message) def test_road_filter(self):", "from apps.data.road_segmenting.road_filter import filter_road from vapi.constants import MAX_SEGMENT_LENGTH, MIN_COORDINATES_LENGTH from api.segmenter.calculate_distance import calculate_road_length_simple", "calculate_road_length_simple(segment[\"the_geom\"][\"coordinates\"]) self.assertLess(abs(length_after - length_before), margin, \"The difference between the original \" \"length and", "x in range(i+1, length): other_road = self.road_net_segmented[x][\"the_geom\"] self.assertNotEqual(road, other_road, \"Duplicate segment geometry coordinates\")", "= MIN_COORDINATES_LENGTH network = vegnet_to_geojson(cls.kommune, cls.vegref) cls.count, cls.road_net = network[0], network[1][\"features\"] # Apparently", "segment :return: Nothing \"\"\" i = 0 for road in self.road_net: i +=", "in range(length): road = self.road_net_segmented[i][\"the_geom\"] for x in range(i+1, length): other_road = self.road_net_segmented[x][\"the_geom\"]", "road[\"the_geom\"][\"coordinates\"] road_segmented = split_segment(road, self.max_segment_distance, [], self.min_coordinates_length) coordinates_segmented = [] for segment in", "has less than \" + str(self.min_coordinates_length) + \" GPS coordinates\" for segment in", "segment in self.road_net_segmented: # coordinates are split by commas, so the count of", "road_distance < self.max_segment_distance: self.assertTrue(road_segmented_length == 1, \"This road was segmented, but should not", "= convert(road) road_coords_length = len(converted_road[\"the_geom\"][\"coordinates\"]) road_distance = calculate_road_length_simple(converted_road[\"the_geom\"][\"coordinates\"]) road_segmented = segment_network([filter_road(road)], self.max_segment_distance, self.min_coordinates_length)", "vapi.constants import MAX_SEGMENT_LENGTH, MIN_COORDINATES_LENGTH from api.segmenter.calculate_distance import calculate_road_length_simple from api.segmenter.road_segmenter import segment_network, split_segment", "The geometry_to_list function should return a dictionary containing coordinates as a list, otherwise", "self.max_segment_distance: self.assertTrue(road_segmented_length == 1, \"This road was segmented, but should not have been.\")", "dictionary containing coordinates as a list, otherwise the segmenter can't split segments :return:", "with the end gps point of the previous segment :return: Nothing \"\"\" for", "apps.data.road_segmenting.road_fetcher import vegnet_to_geojson from apps.data.road_segmenting.road_filter import filter_road from vapi.constants import MAX_SEGMENT_LENGTH, MIN_COORDINATES_LENGTH from", "in self.road_net_segmented: self.assertIsInstance(segment, dict, error_message) def test_split_segment_geometry_len(self): \"\"\" Given a list of roads", "multiple segments with the same coordinates \"\"\" length = len(self.road_net_segmented)-1 for i in", "should start with the end gps point of the previous segment :return: Nothing", "The road_filter function should return a string, otherwise segmentation will crash in later", "= calculate_road_length_simple(converted_road[\"the_geom\"][\"coordinates\"]) road_segmented = segment_network([filter_road(road)], self.max_segment_distance, self.min_coordinates_length) road_segmented_length = len(road_segmented) if road_distance <", "road in self.road_net: road = convert(road) coordinates_original = road[\"the_geom\"][\"coordinates\"] road_segmented = split_segment(road, self.max_segment_distance,", "coordinates_segmented.extend(segment[\"the_geom\"][\"coordinates\"]) for coordinate in coordinates_original: self.assertTrue(coordinate in coordinates_segmented, \"Missing coordinate after segmenting\") def", "in self.road_net: road = convert(road) length_before = calculate_road_length_simple(road[\"the_geom\"][\"coordinates\"]) road_segmented = split_segment(road, self.max_segment_distance, [],", "5001 cls.vegref = \"kg\" cls.max_segment_distance = MAX_SEGMENT_LENGTH cls.min_coordinates_length = MIN_COORDINATES_LENGTH network = vegnet_to_geojson(cls.kommune,", "test_calculate_road_length(self): \"\"\" The total distance of the segmented road should be similar to", "+ str(self.min_coordinates_length) + \" GPS coordinates\" for segment in self.road_net_segmented: # coordinates are", "split_segment(road, self.max_segment_distance, [], self.min_coordinates_length) coordinates_segmented = [] for segment in road_segmented: coordinates_segmented.extend(segment[\"the_geom\"][\"coordinates\"]) for", "each test function it is needed instead of here. road_net_list = [] for", "been.\") elif road_coords_length >= 2*self.min_coordinates_length and road_distance > self.max_segment_distance: self.assertTrue(road_segmented_length > 1, (\"This", "funky and the road_net does not stay filtered after setUpClass is run, #", "road_segmented: coordinates_segmented.extend(segment[\"the_geom\"][\"coordinates\"]) for coordinate in coordinates_original: self.assertTrue(coordinate in coordinates_segmented, \"Missing coordinate after segmenting\")", "should return a list :return: Nothing \"\"\" self.assertIsInstance(self.road_net_segmented, list, \"The road segmenter did", "cls.vegref = \"kg\" cls.max_segment_distance = MAX_SEGMENT_LENGTH cls.min_coordinates_length = MIN_COORDINATES_LENGTH network = vegnet_to_geojson(cls.kommune, cls.vegref)", "range(1, len(road_segmented)): curr_segment = road_segmented[i] prev_segment = road_segmented[i-1] end_coordinate = len(prev_segment[\"the_geom\"][\"coordinates\"])-1 self.assertEqual(curr_segment[\"the_geom\"][\"coordinates\"][0], prev_segment[\"the_geom\"][\"coordinates\"][end_coordinate],", "\"\"\" The road_segmenter function should return a list :return: Nothing \"\"\" self.assertIsInstance(self.road_net_segmented, list,", "the tests to run \"\"\" pass def test_road_segmenter_list(self): \"\"\" The road_segmenter function should", "in self.road_net: road = convert(road) coordinates_original = road[\"the_geom\"][\"coordinates\"] road_segmented = split_segment(road, self.max_segment_distance, [],", "self.assertTrue(coordinate in coordinates_segmented, \"Missing coordinate after segmenting\") def test_over_and_undersegmenting(self): \"\"\" The segmenter should", "should have been segmented, but was not. \" \"Stretchdistance:\", road_distance, \"Coordinates:\", converted_road[\"the_geom\"][\"coordinates\"], i))", "pass def test_road_segmenter_list(self): \"\"\" The road_segmenter function should return a list :return: Nothing", "correctly chained\") def test_split_segment_negative_length(self): \"\"\" No road segments should have a negative road", "function should return a list :return: Nothing \"\"\" self.assertIsInstance(self.road_net_segmented, list, \"The road segmenter", "length_after += calculate_road_length_simple(segment[\"the_geom\"][\"coordinates\"]) self.assertLess(abs(length_after - length_before), margin, \"The difference between the original \"", "for road in cls.road_net: road_net_list.append(filter_road(road)) cls.road_net_segmented = segment_network(road_net_list, cls.max_segment_distance, cls.min_coordinates_length) def setUp(self): \"\"\"", "in road_segmented: length_after += calculate_road_length_simple(segment[\"the_geom\"][\"coordinates\"]) self.assertLess(abs(length_after - length_before), margin, \"The difference between the", "geometry into a string\") def test_geometry_conversion(self): \"\"\" The geometry_to_list function should return a", "in coordinates_segmented, \"Missing coordinate after segmenting\") def test_over_and_undersegmenting(self): \"\"\" The segmenter should only", "start with the end gps point of the previous segment :return: Nothing \"\"\"", "\"geometry_to_list should return a \" \"dictionary\") self.assertIsInstance(road[\"the_geom\"][\"coordinates\"], list, \"geometry_to_list should return a turn", "are of type dict \\n\" for segment in self.road_net_segmented: self.assertIsInstance(segment, dict, error_message) def", "< self.max_segment_distance: self.assertTrue(road_segmented_length == 1, \"This road was segmented, but should not have", "the limit in length, it should never segment something shorter than that. In", "convert(road) self.assertIsInstance(road[\"the_geom\"], dict, \"geometry_to_list should return a \" \"dictionary\") self.assertIsInstance(road[\"the_geom\"][\"coordinates\"], list, \"geometry_to_list should", "elements in the split list are of type dict \\n\" for segment in", "been segmented, but was not. \" \"Stretchdistance:\", road_distance, \"Coordinates:\", converted_road[\"the_geom\"][\"coordinates\"], i)) if __name__", "coordinates_amount = segment[\"the_geom\"].count(\",\") self.assertGreaterEqual(coordinates_amount+1, self.min_coordinates_length, error_message) def test_road_filter(self): \"\"\" The road_filter function should", "road_segmented[i-1] end_coordinate = len(prev_segment[\"the_geom\"][\"coordinates\"])-1 self.assertEqual(curr_segment[\"the_geom\"][\"coordinates\"][0], prev_segment[\"the_geom\"][\"coordinates\"][end_coordinate], \"Segments are not correctly chained\") def test_split_segment_negative_length(self):", "gps point of the previous segment :return: Nothing \"\"\" for road in self.road_net:", ":return: Nothing \"\"\" i = 0 for road in self.road_net: i += 1", "cls.min_coordinates_length) def setUp(self): \"\"\" Needs to be here for the tests to run", "list, \"geometry_to_list should return a turn the \" \"coordinates into a list\") def", "The segmenter should only run on segments that are over the limit in", "split_segment(road, self.max_segment_distance, [], self.min_coordinates_length) length_after = 0 for segment in road_segmented: length_after +=", "Needs to be here for the tests to run \"\"\" pass def test_road_segmenter_list(self):", "road_distance > self.max_segment_distance: self.assertTrue(road_segmented_length > 1, (\"This road should have been segmented, but", "something shorter than that. In other words the segmented road should still be", "in coordinates_original: self.assertTrue(coordinate in coordinates_segmented, \"Missing coordinate after segmenting\") def test_over_and_undersegmenting(self): \"\"\" The", "that are over the limit in length, it should never segment something shorter", "road_filter function should return a string, otherwise segmentation will crash in later stages", "road = convert(road) self.assertIsInstance(road[\"the_geom\"], dict, \"geometry_to_list should return a \" \"dictionary\") self.assertIsInstance(road[\"the_geom\"][\"coordinates\"], list,", "In other words the segmented road should still be only one segment :return:", "and the road_net does not stay filtered after setUpClass is run, # so", "a list :return: Nothing \"\"\" self.assertIsInstance(self.road_net_segmented, list, \"The road segmenter did not return", "coordinates as a list, otherwise the segmenter can't split segments :return: Nothing \"\"\"", "\" \"length and the segmented length is \" \"too large\") def test_split_segment_chaining(self): \"\"\"", "coordinate after segmenting\") def test_over_and_undersegmenting(self): \"\"\" The segmenter should only run on segments", "original \" \"length and the segmented length is \" \"too large\") def test_split_segment_chaining(self):", "= split_segment(road, self.max_segment_distance, [], self.min_coordinates_length) length_after = 0 for segment in road_segmented: length_after", "road should be similar to the length before segmentation, within a margin given", "filter_road(road) road[\"the_geom\"] = geometry_to_list(road[\"the_geom\"]) return road class TestSegmenting(unittest.TestCase): @classmethod def setUpClass(cls): cls.kommune =", "other_road = self.road_net_segmented[x][\"the_geom\"] self.assertNotEqual(road, other_road, \"Duplicate segment geometry coordinates\") def test_missing_coordinates(self): \"\"\" All", "= segment_network([filter_road(road)], self.max_segment_distance, self.min_coordinates_length) road_segmented_length = len(road_segmented) if road_distance < self.max_segment_distance: self.assertTrue(road_segmented_length ==", "coordinates should still be present after segmenting road network :return: Nothing \"\"\" for", "Nothing \"\"\" for road in self.road_net: road = filter_road(road) self.assertIsInstance(road[\"the_geom\"], str, \"road_filter should", "the segmented length is \" \"too large\") def test_split_segment_chaining(self): \"\"\" Every connected segment", "= [] for road in cls.road_net: road_net_list.append(filter_road(road)) cls.road_net_segmented = segment_network(road_net_list, cls.max_segment_distance, cls.min_coordinates_length) def", "in road_segmented: coordinates_segmented.extend(segment[\"the_geom\"][\"coordinates\"]) for coordinate in coordinates_original: self.assertTrue(coordinate in coordinates_segmented, \"Missing coordinate after", "at least 1 meter\") def test_duplicate_segments(self): \"\"\" Test if there are multiple segments", "in self.road_net_segmented: # coordinates are split by commas, so the count of commas+1", "least 1 meter\") def test_duplicate_segments(self): \"\"\" Test if there are multiple segments with", "length_after = 0 for segment in road_segmented: length_after += calculate_road_length_simple(segment[\"the_geom\"][\"coordinates\"]) self.assertLess(abs(length_after - length_before),", "(\"This road should have been segmented, but was not. \" \"Stretchdistance:\", road_distance, \"Coordinates:\",", "a margin given by the variable \"margin\" :return: Nothing \"\"\" margin = 3", "self.max_segment_distance: self.assertTrue(road_segmented_length > 1, (\"This road should have been segmented, but was not.", "\"\"\" The total distance of the segmented road should be similar to the", ":return: Nothing \"\"\" for road in self.road_net: road = convert(road) self.assertIsInstance(road[\"the_geom\"], dict, \"geometry_to_list", "self.assertIsInstance(road[\"the_geom\"][\"coordinates\"], list, \"geometry_to_list should return a turn the \" \"coordinates into a list\")", "self.assertIsInstance(road[\"the_geom\"], str, \"road_filter should turn geometry into a string\") def test_geometry_conversion(self): \"\"\" The", "self.road_net_segmented[i][\"the_geom\"] for x in range(i+1, length): other_road = self.road_net_segmented[x][\"the_geom\"] self.assertNotEqual(road, other_road, \"Duplicate segment", "0 for road in self.road_net: i += 1 converted_road = convert(road) road_coords_length =", "split by commas, so the count of commas+1 should be the same as", "MIN_COORDINATES_LENGTH network = vegnet_to_geojson(cls.kommune, cls.vegref) cls.count, cls.road_net = network[0], network[1][\"features\"] # Apparently the", "segmented length is \" \"too large\") def test_split_segment_chaining(self): \"\"\" Every connected segment should", "split_segment(road, self.max_segment_distance, [], self.min_coordinates_length) for i in range(1, len(road_segmented)): curr_segment = road_segmented[i] prev_segment", "segmenter did not return a list\") def test_road_segmenter_list_elements(self): \"\"\" Every element in the", "coordinates\") def test_missing_coordinates(self): \"\"\" All original coordinates should still be present after segmenting", "list are of type dict \\n\" for segment in self.road_net_segmented: self.assertIsInstance(segment, dict, error_message)", "return a \" \"dictionary\") self.assertIsInstance(road[\"the_geom\"][\"coordinates\"], list, \"geometry_to_list should return a turn the \"", "instead it is done in each test function it is needed instead of", "The road_segmenter function should return a list :return: Nothing \"\"\" self.assertIsInstance(self.road_net_segmented, list, \"The", "of type dict \\n\" for segment in self.road_net_segmented: self.assertIsInstance(segment, dict, error_message) def test_split_segment_geometry_len(self):", "\"Not all elements in the split list are of type dict \\n\" for", "+ \" GPS coordinates\" for segment in self.road_net_segmented: # coordinates are split by", "the original \" \"length and the segmented length is \" \"too large\") def", "present after segmenting road network :return: Nothing \"\"\" for road in self.road_net: road", "= calculate_road_length_simple(road[\"the_geom\"][\"coordinates\"]) road_segmented = split_segment(road, self.max_segment_distance, [], self.min_coordinates_length) length_after = 0 for segment", "split_segment def convert(road): road = filter_road(road) road[\"the_geom\"] = geometry_to_list(road[\"the_geom\"]) return road class TestSegmenting(unittest.TestCase):", "split list are of type dict \\n\" for segment in self.road_net_segmented: self.assertIsInstance(segment, dict,", "== 1, \"This road was segmented, but should not have been.\") elif road_coords_length", "range(i+1, length): other_road = self.road_net_segmented[x][\"the_geom\"] self.assertNotEqual(road, other_road, \"Duplicate segment geometry coordinates\") def test_missing_coordinates(self):", "of coordinates coordinates_amount = segment[\"the_geom\"].count(\",\") self.assertGreaterEqual(coordinates_amount+1, self.min_coordinates_length, error_message) def test_road_filter(self): \"\"\" The road_filter", "Given a list of roads segments, the split segments should always have a", "negative road length :return: Nothing \"\"\" for segment in self.road_net_segmented: self.assertGreater(segment[\"stretchdistance\"], 0, \"Stretchdistance", "cls.max_segment_distance = MAX_SEGMENT_LENGTH cls.min_coordinates_length = MIN_COORDINATES_LENGTH network = vegnet_to_geojson(cls.kommune, cls.vegref) cls.count, cls.road_net =", "= filter_road(road) road[\"the_geom\"] = geometry_to_list(road[\"the_geom\"]) return road class TestSegmenting(unittest.TestCase): @classmethod def setUpClass(cls): cls.kommune", "road_segmented = segment_network([filter_road(road)], self.max_segment_distance, self.min_coordinates_length) road_segmented_length = len(road_segmented) if road_distance < self.max_segment_distance: self.assertTrue(road_segmented_length", "coordinates \"\"\" length = len(self.road_net_segmented)-1 for i in range(length): road = self.road_net_segmented[i][\"the_geom\"] for", "calculate_road_length_simple(road[\"the_geom\"][\"coordinates\"]) road_segmented = split_segment(road, self.max_segment_distance, [], self.min_coordinates_length) length_after = 0 for segment in", "TestSegmenting(unittest.TestCase): @classmethod def setUpClass(cls): cls.kommune = 5001 cls.vegref = \"kg\" cls.max_segment_distance = MAX_SEGMENT_LENGTH", "def setUpClass(cls): cls.kommune = 5001 cls.vegref = \"kg\" cls.max_segment_distance = MAX_SEGMENT_LENGTH cls.min_coordinates_length =", "apps.api.segmenter.road_segmenter import geometry_to_list from apps.data.road_segmenting.road_fetcher import vegnet_to_geojson from apps.data.road_segmenting.road_filter import filter_road from vapi.constants", "turn the \" \"coordinates into a list\") def test_calculate_road_length(self): \"\"\" The total distance", "in range(1, len(road_segmented)): curr_segment = road_segmented[i] prev_segment = road_segmented[i-1] end_coordinate = len(prev_segment[\"the_geom\"][\"coordinates\"])-1 self.assertEqual(curr_segment[\"the_geom\"][\"coordinates\"][0],", "with the same coordinates \"\"\" length = len(self.road_net_segmented)-1 for i in range(length): road", "the same as the count of coordinates coordinates_amount = segment[\"the_geom\"].count(\",\") self.assertGreaterEqual(coordinates_amount+1, self.min_coordinates_length, error_message)", "= road[\"the_geom\"][\"coordinates\"] road_segmented = split_segment(road, self.max_segment_distance, [], self.min_coordinates_length) coordinates_segmented = [] for segment", "segment in road_segmented: length_after += calculate_road_length_simple(segment[\"the_geom\"][\"coordinates\"]) self.assertLess(abs(length_after - length_before), margin, \"The difference between", "not correctly chained\") def test_split_segment_negative_length(self): \"\"\" No road segments should have a negative", "commas+1 should be the same as the count of coordinates coordinates_amount = segment[\"the_geom\"].count(\",\")", "self.max_segment_distance, self.min_coordinates_length) road_segmented_length = len(road_segmented) if road_distance < self.max_segment_distance: self.assertTrue(road_segmented_length == 1, \"This", "= len(converted_road[\"the_geom\"][\"coordinates\"]) road_distance = calculate_road_length_simple(converted_road[\"the_geom\"][\"coordinates\"]) road_segmented = segment_network([filter_road(road)], self.max_segment_distance, self.min_coordinates_length) road_segmented_length = len(road_segmented)", "return a string, otherwise segmentation will crash in later stages :return: Nothing \"\"\"", "road = convert(road) length_before = calculate_road_length_simple(road[\"the_geom\"][\"coordinates\"]) road_segmented = split_segment(road, self.max_segment_distance, [], self.min_coordinates_length) length_after", "over the limit in length, it should never segment something shorter than that.", "element in the split segments should be a dict :return: Nothing \"\"\" error_message", "def test_geometry_conversion(self): \"\"\" The geometry_to_list function should return a dictionary containing coordinates as", "distance of the segmented road should be similar to the length before segmentation,", "length :return: Nothing \"\"\" for segment in self.road_net_segmented: self.assertGreater(segment[\"stretchdistance\"], 0, \"Stretchdistance must be", "All original coordinates should still be present after segmenting road network :return: Nothing", "= len(prev_segment[\"the_geom\"][\"coordinates\"])-1 self.assertEqual(curr_segment[\"the_geom\"][\"coordinates\"][0], prev_segment[\"the_geom\"][\"coordinates\"][end_coordinate], \"Segments are not correctly chained\") def test_split_segment_negative_length(self): \"\"\" No", "it should never segment something shorter than that. In other words the segmented", "= 5001 cls.vegref = \"kg\" cls.max_segment_distance = MAX_SEGMENT_LENGTH cls.min_coordinates_length = MIN_COORDINATES_LENGTH network =", "Nothing \"\"\" for road in self.road_net: road = convert(road) self.assertIsInstance(road[\"the_geom\"], dict, \"geometry_to_list should", "road in self.road_net: road = filter_road(road) self.assertIsInstance(road[\"the_geom\"], str, \"road_filter should turn geometry into", "length = len(self.road_net_segmented)-1 for i in range(length): road = self.road_net_segmented[i][\"the_geom\"] for x in", "for segment in road_segmented: coordinates_segmented.extend(segment[\"the_geom\"][\"coordinates\"]) for coordinate in coordinates_original: self.assertTrue(coordinate in coordinates_segmented, \"Missing", "\\n\" for segment in self.road_net_segmented: self.assertIsInstance(segment, dict, error_message) def test_split_segment_geometry_len(self): \"\"\" Given a", "MIN_COORDINATES_LENGTH from api.segmenter.calculate_distance import calculate_road_length_simple from api.segmenter.road_segmenter import segment_network, split_segment def convert(road): road", "for road in self.road_net: road = filter_road(road) self.assertIsInstance(road[\"the_geom\"], str, \"road_filter should turn geometry", "2 or more :return: Nothing \"\"\" error_message = \"Segment has less than \"", "chained\") def test_split_segment_negative_length(self): \"\"\" No road segments should have a negative road length", "= \"Not all elements in the split list are of type dict \\n\"", "should be the same as the count of coordinates coordinates_amount = segment[\"the_geom\"].count(\",\") self.assertGreaterEqual(coordinates_amount+1,", "can't split segments :return: Nothing \"\"\" for road in self.road_net: road = convert(road)", "are over the limit in length, it should never segment something shorter than", "i = 0 for road in self.road_net: i += 1 converted_road = convert(road)", "1, (\"This road should have been segmented, but was not. \" \"Stretchdistance:\", road_distance,", "range(length): road = self.road_net_segmented[i][\"the_geom\"] for x in range(i+1, length): other_road = self.road_net_segmented[x][\"the_geom\"] self.assertNotEqual(road,", "original coordinates should still be present after segmenting road network :return: Nothing \"\"\"", "= convert(road) coordinates_original = road[\"the_geom\"][\"coordinates\"] road_segmented = split_segment(road, self.max_segment_distance, [], self.min_coordinates_length) coordinates_segmented =", "should be a dict :return: Nothing \"\"\" error_message = \"Not all elements in", "otherwise the segmenter can't split segments :return: Nothing \"\"\" for road in self.road_net:", "from api.segmenter.calculate_distance import calculate_road_length_simple from api.segmenter.road_segmenter import segment_network, split_segment def convert(road): road =", "self.road_net: road = convert(road) road_segmented = split_segment(road, self.max_segment_distance, [], self.min_coordinates_length) for i in", "a list, otherwise the segmenter can't split segments :return: Nothing \"\"\" for road", "but should not have been.\") elif road_coords_length >= 2*self.min_coordinates_length and road_distance > self.max_segment_distance:", "between the original \" \"length and the segmented length is \" \"too large\")", "only run on segments that are over the limit in length, it should", "i += 1 converted_road = convert(road) road_coords_length = len(converted_road[\"the_geom\"][\"coordinates\"]) road_distance = calculate_road_length_simple(converted_road[\"the_geom\"][\"coordinates\"]) road_segmented", "the segmented road should be similar to the length before segmentation, within a", "the split list are of type dict \\n\" for segment in self.road_net_segmented: self.assertIsInstance(segment,", "test_split_segment_negative_length(self): \"\"\" No road segments should have a negative road length :return: Nothing", "less than \" + str(self.min_coordinates_length) + \" GPS coordinates\" for segment in self.road_net_segmented:", "of the previous segment :return: Nothing \"\"\" for road in self.road_net: road =", "segmented road should be similar to the length before segmentation, within a margin", "self.road_net: road = convert(road) length_before = calculate_road_length_simple(road[\"the_geom\"][\"coordinates\"]) road_segmented = split_segment(road, self.max_segment_distance, [], self.min_coordinates_length)", "def test_road_filter(self): \"\"\" The road_filter function should return a string, otherwise segmentation will", "road in self.road_net: i += 1 converted_road = convert(road) road_coords_length = len(converted_road[\"the_geom\"][\"coordinates\"]) road_distance", "margin, \"The difference between the original \" \"length and the segmented length is", "length before segmentation, within a margin given by the variable \"margin\" :return: Nothing", "\"Duplicate segment geometry coordinates\") def test_missing_coordinates(self): \"\"\" All original coordinates should still be", "for i in range(1, len(road_segmented)): curr_segment = road_segmented[i] prev_segment = road_segmented[i-1] end_coordinate =", "setUpClass is run, # so instead it is done in each test function", "self.min_coordinates_length, error_message) def test_road_filter(self): \"\"\" The road_filter function should return a string, otherwise", "one segment :return: Nothing \"\"\" i = 0 for road in self.road_net: i", "geometry coordinates\") def test_missing_coordinates(self): \"\"\" All original coordinates should still be present after", "Every element in the split segments should be a dict :return: Nothing \"\"\"", "self.assertIsInstance(self.road_net_segmented, list, \"The road segmenter did not return a list\") def test_road_segmenter_list_elements(self): \"\"\"", "def test_split_segment_negative_length(self): \"\"\" No road segments should have a negative road length :return:", "\" \"dictionary\") self.assertIsInstance(road[\"the_geom\"][\"coordinates\"], list, \"geometry_to_list should return a turn the \" \"coordinates into", "segment geometry coordinates\") def test_missing_coordinates(self): \"\"\" All original coordinates should still be present", "before segmentation, within a margin given by the variable \"margin\" :return: Nothing \"\"\"", "by commas, so the count of commas+1 should be the same as the", "\"too large\") def test_split_segment_chaining(self): \"\"\" Every connected segment should start with the end", "if there are multiple segments with the same coordinates \"\"\" length = len(self.road_net_segmented)-1", "segmented road should still be only one segment :return: Nothing \"\"\" i =", "more :return: Nothing \"\"\" error_message = \"Segment has less than \" + str(self.min_coordinates_length)", "network[1][\"features\"] # Apparently the setUpClass is a bit funky and the road_net does", "self.assertLess(abs(length_after - length_before), margin, \"The difference between the original \" \"length and the", "return a list :return: Nothing \"\"\" self.assertIsInstance(self.road_net_segmented, list, \"The road segmenter did not", "segments, the split segments should always have a length of 2 or more", "segment :return: Nothing \"\"\" for road in self.road_net: road = convert(road) road_segmented =", "= 0 for road in self.road_net: i += 1 converted_road = convert(road) road_coords_length", "road = filter_road(road) road[\"the_geom\"] = geometry_to_list(road[\"the_geom\"]) return road class TestSegmenting(unittest.TestCase): @classmethod def setUpClass(cls):", "No road segments should have a negative road length :return: Nothing \"\"\" for", "\"\"\" Every element in the split segments should be a dict :return: Nothing", "Nothing \"\"\" error_message = \"Segment has less than \" + str(self.min_coordinates_length) + \"", "the count of coordinates coordinates_amount = segment[\"the_geom\"].count(\",\") self.assertGreaterEqual(coordinates_amount+1, self.min_coordinates_length, error_message) def test_road_filter(self): \"\"\"", "road segments should have a negative road length :return: Nothing \"\"\" for segment", "[], self.min_coordinates_length) coordinates_segmented = [] for segment in road_segmented: coordinates_segmented.extend(segment[\"the_geom\"][\"coordinates\"]) for coordinate in", "as a list, otherwise the segmenter can't split segments :return: Nothing \"\"\" for", "return a list\") def test_road_segmenter_list_elements(self): \"\"\" Every element in the split segments should", "should turn geometry into a string\") def test_geometry_conversion(self): \"\"\" The geometry_to_list function should", "setUpClass is a bit funky and the road_net does not stay filtered after", "def test_split_segment_chaining(self): \"\"\" Every connected segment should start with the end gps point", "self.min_coordinates_length) road_segmented_length = len(road_segmented) if road_distance < self.max_segment_distance: self.assertTrue(road_segmented_length == 1, \"This road", "after segmenting road network :return: Nothing \"\"\" for road in self.road_net: road =", "list of roads segments, the split segments should always have a length of", "than that. In other words the segmented road should still be only one", "needed instead of here. road_net_list = [] for road in cls.road_net: road_net_list.append(filter_road(road)) cls.road_net_segmented", "the \" \"coordinates into a list\") def test_calculate_road_length(self): \"\"\" The total distance of", "not have been.\") elif road_coords_length >= 2*self.min_coordinates_length and road_distance > self.max_segment_distance: self.assertTrue(road_segmented_length >", "curr_segment = road_segmented[i] prev_segment = road_segmented[i-1] end_coordinate = len(prev_segment[\"the_geom\"][\"coordinates\"])-1 self.assertEqual(curr_segment[\"the_geom\"][\"coordinates\"][0], prev_segment[\"the_geom\"][\"coordinates\"][end_coordinate], \"Segments are", "filter_road from vapi.constants import MAX_SEGMENT_LENGTH, MIN_COORDINATES_LENGTH from api.segmenter.calculate_distance import calculate_road_length_simple from api.segmenter.road_segmenter import", "road should have been segmented, but was not. \" \"Stretchdistance:\", road_distance, \"Coordinates:\", converted_road[\"the_geom\"][\"coordinates\"],", "\"The road segmenter did not return a list\") def test_road_segmenter_list_elements(self): \"\"\" Every element", "calculate_road_length_simple(converted_road[\"the_geom\"][\"coordinates\"]) road_segmented = segment_network([filter_road(road)], self.max_segment_distance, self.min_coordinates_length) road_segmented_length = len(road_segmented) if road_distance < self.max_segment_distance:", "is run, # so instead it is done in each test function it", "for i in range(length): road = self.road_net_segmented[i][\"the_geom\"] for x in range(i+1, length): other_road", "geometry_to_list from apps.data.road_segmenting.road_fetcher import vegnet_to_geojson from apps.data.road_segmenting.road_filter import filter_road from vapi.constants import MAX_SEGMENT_LENGTH,", "end_coordinate = len(prev_segment[\"the_geom\"][\"coordinates\"])-1 self.assertEqual(curr_segment[\"the_geom\"][\"coordinates\"][0], prev_segment[\"the_geom\"][\"coordinates\"][end_coordinate], \"Segments are not correctly chained\") def test_split_segment_negative_length(self): \"\"\"", "\"This road was segmented, but should not have been.\") elif road_coords_length >= 2*self.min_coordinates_length", "for segment in self.road_net_segmented: self.assertGreater(segment[\"stretchdistance\"], 0, \"Stretchdistance must be of at least 1", "road = convert(road) road_segmented = split_segment(road, self.max_segment_distance, [], self.min_coordinates_length) for i in range(1,", "for segment in self.road_net_segmented: self.assertIsInstance(segment, dict, error_message) def test_split_segment_geometry_len(self): \"\"\" Given a list", "test_split_segment_chaining(self): \"\"\" Every connected segment should start with the end gps point of", "segment in road_segmented: coordinates_segmented.extend(segment[\"the_geom\"][\"coordinates\"]) for coordinate in coordinates_original: self.assertTrue(coordinate in coordinates_segmented, \"Missing coordinate", "self.min_coordinates_length) for i in range(1, len(road_segmented)): curr_segment = road_segmented[i] prev_segment = road_segmented[i-1] end_coordinate", "should always have a length of 2 or more :return: Nothing \"\"\" error_message", "= segment_network(road_net_list, cls.max_segment_distance, cls.min_coordinates_length) def setUp(self): \"\"\" Needs to be here for the", "coordinates\" for segment in self.road_net_segmented: # coordinates are split by commas, so the", ":return: Nothing \"\"\" error_message = \"Not all elements in the split list are", "\" \"coordinates into a list\") def test_calculate_road_length(self): \"\"\" The total distance of the", "from api.segmenter.road_segmenter import segment_network, split_segment def convert(road): road = filter_road(road) road[\"the_geom\"] = geometry_to_list(road[\"the_geom\"])", "large\") def test_split_segment_chaining(self): \"\"\" Every connected segment should start with the end gps", "road segmenter did not return a list\") def test_road_segmenter_list_elements(self): \"\"\" Every element in", "in each test function it is needed instead of here. road_net_list = []", "split segments should be a dict :return: Nothing \"\"\" error_message = \"Not all", "1, \"This road was segmented, but should not have been.\") elif road_coords_length >=", "commas, so the count of commas+1 should be the same as the count", "self.assertGreater(segment[\"stretchdistance\"], 0, \"Stretchdistance must be of at least 1 meter\") def test_duplicate_segments(self): \"\"\"", "for road in self.road_net: road = convert(road) coordinates_original = road[\"the_geom\"][\"coordinates\"] road_segmented = split_segment(road,", "dict \\n\" for segment in self.road_net_segmented: self.assertIsInstance(segment, dict, error_message) def test_split_segment_geometry_len(self): \"\"\" Given", "coordinates_original: self.assertTrue(coordinate in coordinates_segmented, \"Missing coordinate after segmenting\") def test_over_and_undersegmenting(self): \"\"\" The segmenter", "= len(road_segmented) if road_distance < self.max_segment_distance: self.assertTrue(road_segmented_length == 1, \"This road was segmented,", "the count of commas+1 should be the same as the count of coordinates", "the same coordinates \"\"\" length = len(self.road_net_segmented)-1 for i in range(length): road =", "import segment_network, split_segment def convert(road): road = filter_road(road) road[\"the_geom\"] = geometry_to_list(road[\"the_geom\"]) return road", "\"\"\" for road in self.road_net: road = filter_road(road) self.assertIsInstance(road[\"the_geom\"], str, \"road_filter should turn", "test_geometry_conversion(self): \"\"\" The geometry_to_list function should return a dictionary containing coordinates as a", "geometry_to_list function should return a dictionary containing coordinates as a list, otherwise the", "roads segments, the split segments should always have a length of 2 or", "# coordinates are split by commas, so the count of commas+1 should be", "self.assertNotEqual(road, other_road, \"Duplicate segment geometry coordinates\") def test_missing_coordinates(self): \"\"\" All original coordinates should", "convert(road) length_before = calculate_road_length_simple(road[\"the_geom\"][\"coordinates\"]) road_segmented = split_segment(road, self.max_segment_distance, [], self.min_coordinates_length) length_after = 0", "length): other_road = self.road_net_segmented[x][\"the_geom\"] self.assertNotEqual(road, other_road, \"Duplicate segment geometry coordinates\") def test_missing_coordinates(self): \"\"\"", "all elements in the split list are of type dict \\n\" for segment", "segment in self.road_net_segmented: self.assertIsInstance(segment, dict, error_message) def test_split_segment_geometry_len(self): \"\"\" Given a list of", "[], self.min_coordinates_length) for i in range(1, len(road_segmented)): curr_segment = road_segmented[i] prev_segment = road_segmented[i-1]", "the previous segment :return: Nothing \"\"\" for road in self.road_net: road = convert(road)", "coordinates coordinates_amount = segment[\"the_geom\"].count(\",\") self.assertGreaterEqual(coordinates_amount+1, self.min_coordinates_length, error_message) def test_road_filter(self): \"\"\" The road_filter function", "setUpClass(cls): cls.kommune = 5001 cls.vegref = \"kg\" cls.max_segment_distance = MAX_SEGMENT_LENGTH cls.min_coordinates_length = MIN_COORDINATES_LENGTH", "test_road_filter(self): \"\"\" The road_filter function should return a string, otherwise segmentation will crash", "string, otherwise segmentation will crash in later stages :return: Nothing \"\"\" for road", "cls.road_net = network[0], network[1][\"features\"] # Apparently the setUpClass is a bit funky and", "test_split_segment_geometry_len(self): \"\"\" Given a list of roads segments, the split segments should always", "margin given by the variable \"margin\" :return: Nothing \"\"\" margin = 3 for", "road should still be only one segment :return: Nothing \"\"\" i = 0", "are split by commas, so the count of commas+1 should be the same", "self.assertTrue(road_segmented_length > 1, (\"This road should have been segmented, but was not. \"", "length_before = calculate_road_length_simple(road[\"the_geom\"][\"coordinates\"]) road_segmented = split_segment(road, self.max_segment_distance, [], self.min_coordinates_length) length_after = 0 for", "the split segments should be a dict :return: Nothing \"\"\" error_message = \"Not", "should still be only one segment :return: Nothing \"\"\" i = 0 for", "should not have been.\") elif road_coords_length >= 2*self.min_coordinates_length and road_distance > self.max_segment_distance: self.assertTrue(road_segmented_length", "cls.vegref) cls.count, cls.road_net = network[0], network[1][\"features\"] # Apparently the setUpClass is a bit", "api.segmenter.road_segmenter import segment_network, split_segment def convert(road): road = filter_road(road) road[\"the_geom\"] = geometry_to_list(road[\"the_geom\"]) return", "should return a \" \"dictionary\") self.assertIsInstance(road[\"the_geom\"][\"coordinates\"], list, \"geometry_to_list should return a turn the", "a list of roads segments, the split segments should always have a length", "a turn the \" \"coordinates into a list\") def test_calculate_road_length(self): \"\"\" The total", "later stages :return: Nothing \"\"\" for road in self.road_net: road = filter_road(road) self.assertIsInstance(road[\"the_geom\"],", "it is done in each test function it is needed instead of here.", "\" GPS coordinates\" for segment in self.road_net_segmented: # coordinates are split by commas,", "from vapi.constants import MAX_SEGMENT_LENGTH, MIN_COORDINATES_LENGTH from api.segmenter.calculate_distance import calculate_road_length_simple from api.segmenter.road_segmenter import segment_network,", "Nothing \"\"\" for road in self.road_net: road = convert(road) coordinates_original = road[\"the_geom\"][\"coordinates\"] road_segmented", "tests to run \"\"\" pass def test_road_segmenter_list(self): \"\"\" The road_segmenter function should return", "\"\"\" for road in self.road_net: road = convert(road) coordinates_original = road[\"the_geom\"][\"coordinates\"] road_segmented =", "for segment in road_segmented: length_after += calculate_road_length_simple(segment[\"the_geom\"][\"coordinates\"]) self.assertLess(abs(length_after - length_before), margin, \"The difference", "not stay filtered after setUpClass is run, # so instead it is done", "self.road_net_segmented: # coordinates are split by commas, so the count of commas+1 should", "list, otherwise the segmenter can't split segments :return: Nothing \"\"\" for road in", "segment something shorter than that. In other words the segmented road should still", "road = self.road_net_segmented[i][\"the_geom\"] for x in range(i+1, length): other_road = self.road_net_segmented[x][\"the_geom\"] self.assertNotEqual(road, other_road,", "error_message = \"Segment has less than \" + str(self.min_coordinates_length) + \" GPS coordinates\"", "are multiple segments with the same coordinates \"\"\" length = len(self.road_net_segmented)-1 for i", "be only one segment :return: Nothing \"\"\" i = 0 for road in", "i in range(length): road = self.road_net_segmented[i][\"the_geom\"] for x in range(i+1, length): other_road =", "never segment something shorter than that. In other words the segmented road should", ":return: Nothing \"\"\" for road in self.road_net: road = convert(road) road_segmented = split_segment(road,", "\"\"\" for road in self.road_net: road = convert(road) road_segmented = split_segment(road, self.max_segment_distance, [],", "road_segmented: length_after += calculate_road_length_simple(segment[\"the_geom\"][\"coordinates\"]) self.assertLess(abs(length_after - length_before), margin, \"The difference between the original", "was not. \" \"Stretchdistance:\", road_distance, \"Coordinates:\", converted_road[\"the_geom\"][\"coordinates\"], i)) if __name__ == \"__main__\": unittest.main()", "into a string\") def test_geometry_conversion(self): \"\"\" The geometry_to_list function should return a dictionary", ":return: Nothing \"\"\" for road in self.road_net: road = convert(road) coordinates_original = road[\"the_geom\"][\"coordinates\"]", "for road in self.road_net: road = convert(road) self.assertIsInstance(road[\"the_geom\"], dict, \"geometry_to_list should return a", "are not correctly chained\") def test_split_segment_negative_length(self): \"\"\" No road segments should have a", "self.assertIsInstance(road[\"the_geom\"], dict, \"geometry_to_list should return a \" \"dictionary\") self.assertIsInstance(road[\"the_geom\"][\"coordinates\"], list, \"geometry_to_list should return", "count of commas+1 should be the same as the count of coordinates coordinates_amount", "have been.\") elif road_coords_length >= 2*self.min_coordinates_length and road_distance > self.max_segment_distance: self.assertTrue(road_segmented_length > 1,", "\"\"\" Given a list of roads segments, the split segments should always have", "should return a dictionary containing coordinates as a list, otherwise the segmenter can't", "network[0], network[1][\"features\"] # Apparently the setUpClass is a bit funky and the road_net", "len(prev_segment[\"the_geom\"][\"coordinates\"])-1 self.assertEqual(curr_segment[\"the_geom\"][\"coordinates\"][0], prev_segment[\"the_geom\"][\"coordinates\"][end_coordinate], \"Segments are not correctly chained\") def test_split_segment_negative_length(self): \"\"\" No road", "def test_missing_coordinates(self): \"\"\" All original coordinates should still be present after segmenting road", "for the tests to run \"\"\" pass def test_road_segmenter_list(self): \"\"\" The road_segmenter function", "road_distance = calculate_road_length_simple(converted_road[\"the_geom\"][\"coordinates\"]) road_segmented = segment_network([filter_road(road)], self.max_segment_distance, self.min_coordinates_length) road_segmented_length = len(road_segmented) if road_distance", "have a length of 2 or more :return: Nothing \"\"\" error_message = \"Segment", "def test_split_segment_geometry_len(self): \"\"\" Given a list of roads segments, the split segments should", "in length, it should never segment something shorter than that. In other words", "stay filtered after setUpClass is run, # so instead it is done in", "list :return: Nothing \"\"\" self.assertIsInstance(self.road_net_segmented, list, \"The road segmenter did not return a", "length_before), margin, \"The difference between the original \" \"length and the segmented length", "self.assertIsInstance(segment, dict, error_message) def test_split_segment_geometry_len(self): \"\"\" Given a list of roads segments, the", "containing coordinates as a list, otherwise the segmenter can't split segments :return: Nothing", "did not return a list\") def test_road_segmenter_list_elements(self): \"\"\" Every element in the split", "there are multiple segments with the same coordinates \"\"\" length = len(self.road_net_segmented)-1 for", "\"\"\" error_message = \"Not all elements in the split list are of type", "for road in self.road_net: road = convert(road) length_before = calculate_road_length_simple(road[\"the_geom\"][\"coordinates\"]) road_segmented = split_segment(road,", "was segmented, but should not have been.\") elif road_coords_length >= 2*self.min_coordinates_length and road_distance", "but was not. \" \"Stretchdistance:\", road_distance, \"Coordinates:\", converted_road[\"the_geom\"][\"coordinates\"], i)) if __name__ == \"__main__\":", "2*self.min_coordinates_length and road_distance > self.max_segment_distance: self.assertTrue(road_segmented_length > 1, (\"This road should have been", "self.max_segment_distance, [], self.min_coordinates_length) length_after = 0 for segment in road_segmented: length_after += calculate_road_length_simple(segment[\"the_geom\"][\"coordinates\"])", "+= 1 converted_road = convert(road) road_coords_length = len(converted_road[\"the_geom\"][\"coordinates\"]) road_distance = calculate_road_length_simple(converted_road[\"the_geom\"][\"coordinates\"]) road_segmented =", "import filter_road from vapi.constants import MAX_SEGMENT_LENGTH, MIN_COORDINATES_LENGTH from api.segmenter.calculate_distance import calculate_road_length_simple from api.segmenter.road_segmenter", "> self.max_segment_distance: self.assertTrue(road_segmented_length > 1, (\"This road should have been segmented, but was", "turn geometry into a string\") def test_geometry_conversion(self): \"\"\" The geometry_to_list function should return", "convert(road) road_segmented = split_segment(road, self.max_segment_distance, [], self.min_coordinates_length) for i in range(1, len(road_segmented)): curr_segment", "a negative road length :return: Nothing \"\"\" for segment in self.road_net_segmented: self.assertGreater(segment[\"stretchdistance\"], 0,", "Nothing \"\"\" error_message = \"Not all elements in the split list are of", "self.min_coordinates_length) coordinates_segmented = [] for segment in road_segmented: coordinates_segmented.extend(segment[\"the_geom\"][\"coordinates\"]) for coordinate in coordinates_original:", "still be only one segment :return: Nothing \"\"\" i = 0 for road", "\"geometry_to_list should return a turn the \" \"coordinates into a list\") def test_calculate_road_length(self):", "road_segmenter function should return a list :return: Nothing \"\"\" self.assertIsInstance(self.road_net_segmented, list, \"The road", "self.road_net: i += 1 converted_road = convert(road) road_coords_length = len(converted_road[\"the_geom\"][\"coordinates\"]) road_distance = calculate_road_length_simple(converted_road[\"the_geom\"][\"coordinates\"])", "self.assertGreaterEqual(coordinates_amount+1, self.min_coordinates_length, error_message) def test_road_filter(self): \"\"\" The road_filter function should return a string,", "coordinates_original = road[\"the_geom\"][\"coordinates\"] road_segmented = split_segment(road, self.max_segment_distance, [], self.min_coordinates_length) coordinates_segmented = [] for", "road in cls.road_net: road_net_list.append(filter_road(road)) cls.road_net_segmented = segment_network(road_net_list, cls.max_segment_distance, cls.min_coordinates_length) def setUp(self): \"\"\" Needs", "segment should start with the end gps point of the previous segment :return:", "the end gps point of the previous segment :return: Nothing \"\"\" for road", "so the count of commas+1 should be the same as the count of", "import MAX_SEGMENT_LENGTH, MIN_COORDINATES_LENGTH from api.segmenter.calculate_distance import calculate_road_length_simple from api.segmenter.road_segmenter import segment_network, split_segment def", "segments should have a negative road length :return: Nothing \"\"\" for segment in", "cls.min_coordinates_length = MIN_COORDINATES_LENGTH network = vegnet_to_geojson(cls.kommune, cls.vegref) cls.count, cls.road_net = network[0], network[1][\"features\"] #", "if road_distance < self.max_segment_distance: self.assertTrue(road_segmented_length == 1, \"This road was segmented, but should", "a \" \"dictionary\") self.assertIsInstance(road[\"the_geom\"][\"coordinates\"], list, \"geometry_to_list should return a turn the \" \"coordinates", "is a bit funky and the road_net does not stay filtered after setUpClass", "cls.kommune = 5001 cls.vegref = \"kg\" cls.max_segment_distance = MAX_SEGMENT_LENGTH cls.min_coordinates_length = MIN_COORDINATES_LENGTH network", "road_net_list.append(filter_road(road)) cls.road_net_segmented = segment_network(road_net_list, cls.max_segment_distance, cls.min_coordinates_length) def setUp(self): \"\"\" Needs to be here", "\"\"\" for segment in self.road_net_segmented: self.assertGreater(segment[\"stretchdistance\"], 0, \"Stretchdistance must be of at least", "should return a turn the \" \"coordinates into a list\") def test_calculate_road_length(self): \"\"\"", "for segment in self.road_net_segmented: # coordinates are split by commas, so the count", "function should return a dictionary containing coordinates as a list, otherwise the segmenter", "of 2 or more :return: Nothing \"\"\" error_message = \"Segment has less than", "\"\"\" margin = 3 for road in self.road_net: road = convert(road) length_before =", "= [] for segment in road_segmented: coordinates_segmented.extend(segment[\"the_geom\"][\"coordinates\"]) for coordinate in coordinates_original: self.assertTrue(coordinate in", "1 meter\") def test_duplicate_segments(self): \"\"\" Test if there are multiple segments with the", "point of the previous segment :return: Nothing \"\"\" for road in self.road_net: road", "test_road_segmenter_list_elements(self): \"\"\" Every element in the split segments should be a dict :return:", "meter\") def test_duplicate_segments(self): \"\"\" Test if there are multiple segments with the same", "setUp(self): \"\"\" Needs to be here for the tests to run \"\"\" pass", "road = filter_road(road) self.assertIsInstance(road[\"the_geom\"], str, \"road_filter should turn geometry into a string\") def", "calculate_road_length_simple from api.segmenter.road_segmenter import segment_network, split_segment def convert(road): road = filter_road(road) road[\"the_geom\"] =", "after setUpClass is run, # so instead it is done in each test", "road_segmented = split_segment(road, self.max_segment_distance, [], self.min_coordinates_length) length_after = 0 for segment in road_segmented:", "coordinates are split by commas, so the count of commas+1 should be the", "cls.max_segment_distance, cls.min_coordinates_length) def setUp(self): \"\"\" Needs to be here for the tests to", "instead of here. road_net_list = [] for road in cls.road_net: road_net_list.append(filter_road(road)) cls.road_net_segmented =", "will crash in later stages :return: Nothing \"\"\" for road in self.road_net: road", "return a turn the \" \"coordinates into a list\") def test_calculate_road_length(self): \"\"\" The", "Test if there are multiple segments with the same coordinates \"\"\" length =", "GPS coordinates\" for segment in self.road_net_segmented: # coordinates are split by commas, so", "a list\") def test_road_segmenter_list_elements(self): \"\"\" Every element in the split segments should be", "a bit funky and the road_net does not stay filtered after setUpClass is", "segmented, but was not. \" \"Stretchdistance:\", road_distance, \"Coordinates:\", converted_road[\"the_geom\"][\"coordinates\"], i)) if __name__ ==", "the length before segmentation, within a margin given by the variable \"margin\" :return:", "road in self.road_net: road = convert(road) road_segmented = split_segment(road, self.max_segment_distance, [], self.min_coordinates_length) for", "\"margin\" :return: Nothing \"\"\" margin = 3 for road in self.road_net: road =", "a length of 2 or more :return: Nothing \"\"\" error_message = \"Segment has", "road_segmented = split_segment(road, self.max_segment_distance, [], self.min_coordinates_length) for i in range(1, len(road_segmented)): curr_segment =", "to the length before segmentation, within a margin given by the variable \"margin\"", "len(self.road_net_segmented)-1 for i in range(length): road = self.road_net_segmented[i][\"the_geom\"] for x in range(i+1, length):", "\"length and the segmented length is \" \"too large\") def test_split_segment_chaining(self): \"\"\" Every", "len(converted_road[\"the_geom\"][\"coordinates\"]) road_distance = calculate_road_length_simple(converted_road[\"the_geom\"][\"coordinates\"]) road_segmented = segment_network([filter_road(road)], self.max_segment_distance, self.min_coordinates_length) road_segmented_length = len(road_segmented) if", ":return: Nothing \"\"\" error_message = \"Segment has less than \" + str(self.min_coordinates_length) +", "to run \"\"\" pass def test_road_segmenter_list(self): \"\"\" The road_segmenter function should return a", "road_net does not stay filtered after setUpClass is run, # so instead it", "road in self.road_net: road = convert(road) length_before = calculate_road_length_simple(road[\"the_geom\"][\"coordinates\"]) road_segmented = split_segment(road, self.max_segment_distance,", "should have a negative road length :return: Nothing \"\"\" for segment in self.road_net_segmented:", "previous segment :return: Nothing \"\"\" for road in self.road_net: road = convert(road) road_segmented", "self.max_segment_distance, [], self.min_coordinates_length) coordinates_segmented = [] for segment in road_segmented: coordinates_segmented.extend(segment[\"the_geom\"][\"coordinates\"]) for coordinate", "road[\"the_geom\"] = geometry_to_list(road[\"the_geom\"]) return road class TestSegmenting(unittest.TestCase): @classmethod def setUpClass(cls): cls.kommune = 5001", "than \" + str(self.min_coordinates_length) + \" GPS coordinates\" for segment in self.road_net_segmented: #", "function it is needed instead of here. road_net_list = [] for road in", "str(self.min_coordinates_length) + \" GPS coordinates\" for segment in self.road_net_segmented: # coordinates are split", "segmentation will crash in later stages :return: Nothing \"\"\" for road in self.road_net:", "in self.road_net_segmented: self.assertGreater(segment[\"stretchdistance\"], 0, \"Stretchdistance must be of at least 1 meter\") def", "self.min_coordinates_length) length_after = 0 for segment in road_segmented: length_after += calculate_road_length_simple(segment[\"the_geom\"][\"coordinates\"]) self.assertLess(abs(length_after -", "similar to the length before segmentation, within a margin given by the variable", "test_missing_coordinates(self): \"\"\" All original coordinates should still be present after segmenting road network", "\"\"\" The segmenter should only run on segments that are over the limit", "\"Missing coordinate after segmenting\") def test_over_and_undersegmenting(self): \"\"\" The segmenter should only run on", "must be of at least 1 meter\") def test_duplicate_segments(self): \"\"\" Test if there", ":return: Nothing \"\"\" for road in self.road_net: road = filter_road(road) self.assertIsInstance(road[\"the_geom\"], str, \"road_filter", "cls.road_net: road_net_list.append(filter_road(road)) cls.road_net_segmented = segment_network(road_net_list, cls.max_segment_distance, cls.min_coordinates_length) def setUp(self): \"\"\" Needs to be", "the segmented road should still be only one segment :return: Nothing \"\"\" i", "a dict :return: Nothing \"\"\" error_message = \"Not all elements in the split", "words the segmented road should still be only one segment :return: Nothing \"\"\"", "Nothing \"\"\" for road in self.road_net: road = convert(road) road_segmented = split_segment(road, self.max_segment_distance,", "self.max_segment_distance, [], self.min_coordinates_length) for i in range(1, len(road_segmented)): curr_segment = road_segmented[i] prev_segment =", "the road_net does not stay filtered after setUpClass is run, # so instead", "\"The difference between the original \" \"length and the segmented length is \"", "segments that are over the limit in length, it should never segment something", "0, \"Stretchdistance must be of at least 1 meter\") def test_duplicate_segments(self): \"\"\" Test", "\"\"\" i = 0 for road in self.road_net: i += 1 converted_road =", "road_coords_length = len(converted_road[\"the_geom\"][\"coordinates\"]) road_distance = calculate_road_length_simple(converted_road[\"the_geom\"][\"coordinates\"]) road_segmented = segment_network([filter_road(road)], self.max_segment_distance, self.min_coordinates_length) road_segmented_length =", "have been segmented, but was not. \" \"Stretchdistance:\", road_distance, \"Coordinates:\", converted_road[\"the_geom\"][\"coordinates\"], i)) if", "should never segment something shorter than that. In other words the segmented road", "\"\"\" length = len(self.road_net_segmented)-1 for i in range(length): road = self.road_net_segmented[i][\"the_geom\"] for x", "vegnet_to_geojson(cls.kommune, cls.vegref) cls.count, cls.road_net = network[0], network[1][\"features\"] # Apparently the setUpClass is a", "or more :return: Nothing \"\"\" error_message = \"Segment has less than \" +", "unittest from apps.api.segmenter.road_segmenter import geometry_to_list from apps.data.road_segmenting.road_fetcher import vegnet_to_geojson from apps.data.road_segmenting.road_filter import filter_road", "error_message) def test_road_filter(self): \"\"\" The road_filter function should return a string, otherwise segmentation", "prev_segment[\"the_geom\"][\"coordinates\"][end_coordinate], \"Segments are not correctly chained\") def test_split_segment_negative_length(self): \"\"\" No road segments should", "dict :return: Nothing \"\"\" error_message = \"Not all elements in the split list", "a list\") def test_calculate_road_length(self): \"\"\" The total distance of the segmented road should", "bit funky and the road_net does not stay filtered after setUpClass is run,", "geometry_to_list(road[\"the_geom\"]) return road class TestSegmenting(unittest.TestCase): @classmethod def setUpClass(cls): cls.kommune = 5001 cls.vegref =", "return road class TestSegmenting(unittest.TestCase): @classmethod def setUpClass(cls): cls.kommune = 5001 cls.vegref = \"kg\"", "split segments :return: Nothing \"\"\" for road in self.road_net: road = convert(road) self.assertIsInstance(road[\"the_geom\"],", "in self.road_net: road = filter_road(road) self.assertIsInstance(road[\"the_geom\"], str, \"road_filter should turn geometry into a", "road_segmented[i] prev_segment = road_segmented[i-1] end_coordinate = len(prev_segment[\"the_geom\"][\"coordinates\"])-1 self.assertEqual(curr_segment[\"the_geom\"][\"coordinates\"][0], prev_segment[\"the_geom\"][\"coordinates\"][end_coordinate], \"Segments are not correctly", "def convert(road): road = filter_road(road) road[\"the_geom\"] = geometry_to_list(road[\"the_geom\"]) return road class TestSegmenting(unittest.TestCase): @classmethod", "\"dictionary\") self.assertIsInstance(road[\"the_geom\"][\"coordinates\"], list, \"geometry_to_list should return a turn the \" \"coordinates into a", "= split_segment(road, self.max_segment_distance, [], self.min_coordinates_length) for i in range(1, len(road_segmented)): curr_segment = road_segmented[i]", "\"Segments are not correctly chained\") def test_split_segment_negative_length(self): \"\"\" No road segments should have", "road network :return: Nothing \"\"\" for road in self.road_net: road = convert(road) coordinates_original", "len(road_segmented) if road_distance < self.max_segment_distance: self.assertTrue(road_segmented_length == 1, \"This road was segmented, but", "\"\"\" No road segments should have a negative road length :return: Nothing \"\"\"", "self.assertTrue(road_segmented_length == 1, \"This road was segmented, but should not have been.\") elif", "cls.road_net_segmented = segment_network(road_net_list, cls.max_segment_distance, cls.min_coordinates_length) def setUp(self): \"\"\" Needs to be here for", "Every connected segment should start with the end gps point of the previous", "+= calculate_road_length_simple(segment[\"the_geom\"][\"coordinates\"]) self.assertLess(abs(length_after - length_before), margin, \"The difference between the original \" \"length", "segmented, but should not have been.\") elif road_coords_length >= 2*self.min_coordinates_length and road_distance >", "converted_road = convert(road) road_coords_length = len(converted_road[\"the_geom\"][\"coordinates\"]) road_distance = calculate_road_length_simple(converted_road[\"the_geom\"][\"coordinates\"]) road_segmented = segment_network([filter_road(road)], self.max_segment_distance,", "convert(road) road_coords_length = len(converted_road[\"the_geom\"][\"coordinates\"]) road_distance = calculate_road_length_simple(converted_road[\"the_geom\"][\"coordinates\"]) road_segmented = segment_network([filter_road(road)], self.max_segment_distance, self.min_coordinates_length) road_segmented_length", "segment_network([filter_road(road)], self.max_segment_distance, self.min_coordinates_length) road_segmented_length = len(road_segmented) if road_distance < self.max_segment_distance: self.assertTrue(road_segmented_length == 1,", "def setUp(self): \"\"\" Needs to be here for the tests to run \"\"\"", "road_net_list = [] for road in cls.road_net: road_net_list.append(filter_road(road)) cls.road_net_segmented = segment_network(road_net_list, cls.max_segment_distance, cls.min_coordinates_length)", "not return a list\") def test_road_segmenter_list_elements(self): \"\"\" Every element in the split segments", "coordinates_segmented = [] for segment in road_segmented: coordinates_segmented.extend(segment[\"the_geom\"][\"coordinates\"]) for coordinate in coordinates_original: self.assertTrue(coordinate", "connected segment should start with the end gps point of the previous segment", "otherwise segmentation will crash in later stages :return: Nothing \"\"\" for road in", "segments should be a dict :return: Nothing \"\"\" error_message = \"Not all elements", "Apparently the setUpClass is a bit funky and the road_net does not stay", "import calculate_road_length_simple from api.segmenter.road_segmenter import segment_network, split_segment def convert(road): road = filter_road(road) road[\"the_geom\"]", "= vegnet_to_geojson(cls.kommune, cls.vegref) cls.count, cls.road_net = network[0], network[1][\"features\"] # Apparently the setUpClass is", "= \"kg\" cls.max_segment_distance = MAX_SEGMENT_LENGTH cls.min_coordinates_length = MIN_COORDINATES_LENGTH network = vegnet_to_geojson(cls.kommune, cls.vegref) cls.count,", "Nothing \"\"\" self.assertIsInstance(self.road_net_segmented, list, \"The road segmenter did not return a list\") def", "\"road_filter should turn geometry into a string\") def test_geometry_conversion(self): \"\"\" The geometry_to_list function", "\"\"\" The road_filter function should return a string, otherwise segmentation will crash in", "= filter_road(road) self.assertIsInstance(road[\"the_geom\"], str, \"road_filter should turn geometry into a string\") def test_geometry_conversion(self):", "be here for the tests to run \"\"\" pass def test_road_segmenter_list(self): \"\"\" The", "convert(road) coordinates_original = road[\"the_geom\"][\"coordinates\"] road_segmented = split_segment(road, self.max_segment_distance, [], self.min_coordinates_length) coordinates_segmented = []", "segmenter should only run on segments that are over the limit in length,", "\" + str(self.min_coordinates_length) + \" GPS coordinates\" for segment in self.road_net_segmented: # coordinates", "for coordinate in coordinates_original: self.assertTrue(coordinate in coordinates_segmented, \"Missing coordinate after segmenting\") def test_over_and_undersegmenting(self):", "other words the segmented road should still be only one segment :return: Nothing", "should still be present after segmenting road network :return: Nothing \"\"\" for road", "0 for segment in road_segmented: length_after += calculate_road_length_simple(segment[\"the_geom\"][\"coordinates\"]) self.assertLess(abs(length_after - length_before), margin, \"The", "length, it should never segment something shorter than that. In other words the", "filtered after setUpClass is run, # so instead it is done in each", "after segmenting\") def test_over_and_undersegmenting(self): \"\"\" The segmenter should only run on segments that", ":return: Nothing \"\"\" self.assertIsInstance(self.road_net_segmented, list, \"The road segmenter did not return a list\")", "should be similar to the length before segmentation, within a margin given by", "of at least 1 meter\") def test_duplicate_segments(self): \"\"\" Test if there are multiple", "in later stages :return: Nothing \"\"\" for road in self.road_net: road = filter_road(road)", "in self.road_net: road = convert(road) road_segmented = split_segment(road, self.max_segment_distance, [], self.min_coordinates_length) for i", "should return a string, otherwise segmentation will crash in later stages :return: Nothing", "> 1, (\"This road should have been segmented, but was not. \" \"Stretchdistance:\",", "the segmenter can't split segments :return: Nothing \"\"\" for road in self.road_net: road", "= convert(road) self.assertIsInstance(road[\"the_geom\"], dict, \"geometry_to_list should return a \" \"dictionary\") self.assertIsInstance(road[\"the_geom\"][\"coordinates\"], list, \"geometry_to_list", "to be here for the tests to run \"\"\" pass def test_road_segmenter_list(self): \"\"\"", "list\") def test_road_segmenter_list_elements(self): \"\"\" Every element in the split segments should be a", "margin = 3 for road in self.road_net: road = convert(road) length_before = calculate_road_length_simple(road[\"the_geom\"][\"coordinates\"])", "road class TestSegmenting(unittest.TestCase): @classmethod def setUpClass(cls): cls.kommune = 5001 cls.vegref = \"kg\" cls.max_segment_distance", "= len(self.road_net_segmented)-1 for i in range(length): road = self.road_net_segmented[i][\"the_geom\"] for x in range(i+1,", "in self.road_net: road = convert(road) self.assertIsInstance(road[\"the_geom\"], dict, \"geometry_to_list should return a \" \"dictionary\")", "api.segmenter.calculate_distance import calculate_road_length_simple from api.segmenter.road_segmenter import segment_network, split_segment def convert(road): road = filter_road(road)", "difference between the original \" \"length and the segmented length is \" \"too", "\"\"\" Every connected segment should start with the end gps point of the", "limit in length, it should never segment something shorter than that. In other", "is \" \"too large\") def test_split_segment_chaining(self): \"\"\" Every connected segment should start with", "= road_segmented[i] prev_segment = road_segmented[i-1] end_coordinate = len(prev_segment[\"the_geom\"][\"coordinates\"])-1 self.assertEqual(curr_segment[\"the_geom\"][\"coordinates\"][0], prev_segment[\"the_geom\"][\"coordinates\"][end_coordinate], \"Segments are not", "run, # so instead it is done in each test function it is", "count of coordinates coordinates_amount = segment[\"the_geom\"].count(\",\") self.assertGreaterEqual(coordinates_amount+1, self.min_coordinates_length, error_message) def test_road_filter(self): \"\"\" The", "len(road_segmented)): curr_segment = road_segmented[i] prev_segment = road_segmented[i-1] end_coordinate = len(prev_segment[\"the_geom\"][\"coordinates\"])-1 self.assertEqual(curr_segment[\"the_geom\"][\"coordinates\"][0], prev_segment[\"the_geom\"][\"coordinates\"][end_coordinate], \"Segments", "network :return: Nothing \"\"\" for road in self.road_net: road = convert(road) coordinates_original =", "\"\"\" error_message = \"Segment has less than \" + str(self.min_coordinates_length) + \" GPS", "\"\"\" All original coordinates should still be present after segmenting road network :return:", "= self.road_net_segmented[i][\"the_geom\"] for x in range(i+1, length): other_road = self.road_net_segmented[x][\"the_geom\"] self.assertNotEqual(road, other_road, \"Duplicate", "elif road_coords_length >= 2*self.min_coordinates_length and road_distance > self.max_segment_distance: self.assertTrue(road_segmented_length > 1, (\"This road", "self.road_net: road = filter_road(road) self.assertIsInstance(road[\"the_geom\"], str, \"road_filter should turn geometry into a string\")", "def test_calculate_road_length(self): \"\"\" The total distance of the segmented road should be similar", "self.road_net_segmented: self.assertGreater(segment[\"stretchdistance\"], 0, \"Stretchdistance must be of at least 1 meter\") def test_duplicate_segments(self):", "given by the variable \"margin\" :return: Nothing \"\"\" margin = 3 for road", "= \"Segment has less than \" + str(self.min_coordinates_length) + \" GPS coordinates\" for", "in self.road_net: i += 1 converted_road = convert(road) road_coords_length = len(converted_road[\"the_geom\"][\"coordinates\"]) road_distance =", "dict, error_message) def test_split_segment_geometry_len(self): \"\"\" Given a list of roads segments, the split", "self.road_net_segmented[x][\"the_geom\"] self.assertNotEqual(road, other_road, \"Duplicate segment geometry coordinates\") def test_missing_coordinates(self): \"\"\" All original coordinates", "in the split list are of type dict \\n\" for segment in self.road_net_segmented:", "# Apparently the setUpClass is a bit funky and the road_net does not", "Nothing \"\"\" margin = 3 for road in self.road_net: road = convert(road) length_before", "network = vegnet_to_geojson(cls.kommune, cls.vegref) cls.count, cls.road_net = network[0], network[1][\"features\"] # Apparently the setUpClass", "within a margin given by the variable \"margin\" :return: Nothing \"\"\" margin =", "in the split segments should be a dict :return: Nothing \"\"\" error_message =", "segment[\"the_geom\"].count(\",\") self.assertGreaterEqual(coordinates_amount+1, self.min_coordinates_length, error_message) def test_road_filter(self): \"\"\" The road_filter function should return a", "still be present after segmenting road network :return: Nothing \"\"\" for road in", "road in self.road_net: road = convert(road) self.assertIsInstance(road[\"the_geom\"], dict, \"geometry_to_list should return a \"", "convert(road): road = filter_road(road) road[\"the_geom\"] = geometry_to_list(road[\"the_geom\"]) return road class TestSegmenting(unittest.TestCase): @classmethod def", ":return: Nothing \"\"\" for segment in self.road_net_segmented: self.assertGreater(segment[\"stretchdistance\"], 0, \"Stretchdistance must be of", "\"kg\" cls.max_segment_distance = MAX_SEGMENT_LENGTH cls.min_coordinates_length = MIN_COORDINATES_LENGTH network = vegnet_to_geojson(cls.kommune, cls.vegref) cls.count, cls.road_net", "MAX_SEGMENT_LENGTH cls.min_coordinates_length = MIN_COORDINATES_LENGTH network = vegnet_to_geojson(cls.kommune, cls.vegref) cls.count, cls.road_net = network[0], network[1][\"features\"]", "the split segments should always have a length of 2 or more :return:", "done in each test function it is needed instead of here. road_net_list =" ]
[ "group=\"authority\") x509_cert: str = betterproto.string_field(2, group=\"authority\") presentation_type_uri: str = betterproto.string_field(10) valid_from_utc: int =", "def unregister_issuer( self, *, did_uri: str = \"\", x509_cert: str = \"\", credential_type_uri:", "\"CheckIssuerStatusResponse\": request = CheckIssuerStatusRequest() request.governance_framework_uri = governance_framework_uri if did_uri: request.did_uri = did_uri if", "valid_until_utc: int = 0, governance_framework_uri: str = \"\", ) -> \"RegisterIssuerResponse\": request =", "stream: grpclib.server.Stream) -> None: request = await stream.recv_message() request_kwargs = { \"governance_framework_uri\": request.governance_framework_uri,", "await self.search_registry(**request_kwargs) await stream.send_message(response) async def __rpc_register_issuer(self, stream: grpclib.server.Stream) -> None: request =", "request = SearchRegistryRequest() request.query = query request.continuation_token = continuation_token if options is not", "did_uri: str, x509_cert: str, presentation_type_uri: str, ) -> \"CheckVerifierStatusResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def", "str = betterproto.string_field(10) governance_framework_uri: str = betterproto.string_field(20) @dataclass(eq=False, repr=False) class UnregisterVerifierResponse(betterproto.Message): status: \"__common_v1__.ResponseStatus\"", "\"\", presentation_type_uri: str = \"\", valid_from_utc: int = 0, valid_until_utc: int = 0,", "governance_framework_uri: str = betterproto.string_field(1) query: str = betterproto.string_field(2) @dataclass(eq=False, repr=False) class FetchDataResponse(betterproto.Message): response_json:", "query request.continuation_token = continuation_token if options is not None: request.options = options return", "async def add_framework( self, *, governance_framework: \"GovernanceFramework\" = None ) -> \"AddFrameworkResponse\": request", "@dataclass(eq=False, repr=False) class RemoveFrameworkRequest(betterproto.Message): governance_framework: \"GovernanceFramework\" = betterproto.message_field(1) @dataclass(eq=False, repr=False) class RemoveFrameworkResponse(betterproto.Message): status:", "\"RemoveFrameworkResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def search_registry( self, query: str, continuation_token: str, options: \"__common_v1__.RequestOptions\",", "= \"\", did_uri: str = \"\", x509_cert: str = \"\", credential_type_uri: str =", "governance_framework is not None: request.governance_framework = governance_framework return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/AddFramework\", request, AddFrameworkResponse,", "str, x509_cert: str, credential_type_uri: str, valid_from_utc: int, valid_until_utc: int, governance_framework_uri: str, ) ->", "credential_type_uri request.valid_from_utc = valid_from_utc request.valid_until_utc = valid_until_utc request.governance_framework_uri = governance_framework_uri return await self._unary_unary(", ") -> \"RemoveFrameworkResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def search_registry( self, query: str, continuation_token: str,", "= betterproto.string_field(3, group=\"member\") credential_type_uri: str = betterproto.string_field(4) @dataclass(eq=False, repr=False) class CheckIssuerStatusResponse(betterproto.Message): status: \"RegistrationStatus\"", "\"RegisterIssuerResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def register_verifier( self, did_uri: str, x509_cert: str, presentation_type_uri: str,", "async def remove_framework( self, governance_framework: \"GovernanceFramework\" ) -> \"RemoveFrameworkResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def", "request.did_uri = did_uri if x509_cert: request.x509_cert = x509_cert request.credential_type_uri = credential_type_uri request.valid_from_utc =", "str = betterproto.string_field(1) has_more_results: bool = betterproto.bool_field(2) continuation_token: str = betterproto.string_field(3) class TrustRegistryStub(betterproto.ServiceStub):", "presentation_type_uri: str = betterproto.string_field(10) governance_framework_uri: str = betterproto.string_field(20) @dataclass(eq=False, repr=False) class UnregisterVerifierResponse(betterproto.Message): status:", "\"GovernanceFramework\" = betterproto.message_field(1) @dataclass(eq=False, repr=False) class RemoveFrameworkResponse(betterproto.Message): status: \"__common_v1__.ResponseStatus\" = betterproto.enum_field(1) @dataclass(eq=False, repr=False)", "= x509_cert request.credential_type_uri = credential_type_uri return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/CheckIssuerStatus\", request, CheckIssuerStatusResponse, ) async", "self, governance_framework_uri: str, query: str ) -> AsyncIterator[\"FetchDataResponse\"]: raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def __rpc_add_framework(self,", "= betterproto.string_field(3) @dataclass(eq=False, repr=False) class RegisterIssuerRequest(betterproto.Message): did_uri: str = betterproto.string_field(1, group=\"authority\") x509_cert: str", "\"did_uri\": request.did_uri, \"x509_cert\": request.x509_cert, \"credential_type_uri\": request.credential_type_uri, \"governance_framework_uri\": request.governance_framework_uri, } response = await self.unregister_issuer(**request_kwargs)", ") -> \"AddFrameworkResponse\": request = AddFrameworkRequest() if governance_framework is not None: request.governance_framework =", "request.x509_cert = x509_cert request.presentation_type_uri = presentation_type_uri return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/CheckVerifierStatus\", request, CheckVerifierStatusResponse, )", "0, governance_framework_uri: str = \"\", ) -> \"RegisterIssuerResponse\": request = RegisterIssuerRequest() if did_uri:", "request = await stream.recv_message() request_kwargs = { \"governance_framework_uri\": request.governance_framework_uri, \"did_uri\": request.did_uri, \"x509_cert\": request.x509_cert,", "grpclib.const.Handler( self.__rpc_fetch_data, grpclib.const.Cardinality.UNARY_STREAM, FetchDataRequest, FetchDataResponse, ), } from ...common import v1 as __common_v1__", "str = betterproto.string_field(1, group=\"authority\") x509_cert: str = betterproto.string_field(2, group=\"authority\") credential_type_uri: str = betterproto.string_field(10)", "governance_framework_uri: str, ) -> \"RegisterIssuerResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def register_verifier( self, did_uri: str,", "= betterproto.string_field(4) @dataclass(eq=False, repr=False) class CheckVerifierStatusResponse(betterproto.Message): status: \"RegistrationStatus\" = betterproto.enum_field(1) @dataclass(eq=False, repr=False) class", "request.presentation_type_uri, } response = await self.check_verifier_status(**request_kwargs) await stream.send_message(response) async def __rpc_fetch_data(self, stream: grpclib.server.Stream)", "= \"\", ) -> \"UnregisterVerifierResponse\": request = UnregisterVerifierRequest() if did_uri: request.did_uri = did_uri", "None, ) -> \"SearchRegistryResponse\": request = SearchRegistryRequest() request.query = query request.continuation_token = continuation_token", ") -> \"CheckIssuerStatusResponse\": request = CheckIssuerStatusRequest() request.governance_framework_uri = governance_framework_uri if did_uri: request.did_uri =", "= \"\", ) -> \"RegisterVerifierResponse\": request = RegisterVerifierRequest() if did_uri: request.did_uri = did_uri", "= betterproto.enum_field(1) @dataclass(eq=False, repr=False) class SearchRegistryRequest(betterproto.Message): query: str = betterproto.string_field(1) continuation_token: str =", "UnregisterIssuerRequest(betterproto.Message): did_uri: str = betterproto.string_field(1, group=\"authority\") x509_cert: str = betterproto.string_field(2, group=\"authority\") credential_type_uri: str", "valid_from_utc: int, valid_until_utc: int, governance_framework_uri: str, ) -> \"RegisterIssuerResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def", "\"CheckIssuerStatusResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def check_verifier_status( self, governance_framework_uri: str, did_uri: str, x509_cert: str,", "def unregister_verifier( self, *, did_uri: str = \"\", x509_cert: str = \"\", presentation_type_uri:", "x509_cert: str = \"\", presentation_type_uri: str = \"\", valid_from_utc: int = 0, valid_until_utc:", "stream, request_kwargs, ) def __mapping__(self) -> Dict[str, grpclib.const.Handler]: return { \"/services.trustregistry.v1.TrustRegistry/AddFramework\": grpclib.const.Handler( self.__rpc_add_framework,", "stream.recv_message() request_kwargs = { \"governance_framework\": request.governance_framework, } response = await self.remove_framework(**request_kwargs) await stream.send_message(response)", "async def register_verifier( self, *, did_uri: str = \"\", x509_cert: str = \"\",", "group=\"authority\") x509_cert: str = betterproto.string_field(2, group=\"authority\") credential_type_uri: str = betterproto.string_field(10) valid_from_utc: int =", "@dataclass(eq=False, repr=False) class AddFrameworkResponse(betterproto.Message): status: \"__common_v1__.ResponseStatus\" = betterproto.enum_field(1) @dataclass(eq=False, repr=False) class RemoveFrameworkRequest(betterproto.Message): governance_framework:", "= betterproto.string_field(1) has_more_results: bool = betterproto.bool_field(2) continuation_token: str = betterproto.string_field(3) class TrustRegistryStub(betterproto.ServiceStub): async", "status: \"__common_v1__.ResponseStatus\" = betterproto.enum_field(1) @dataclass(eq=False, repr=False) class RemoveFrameworkRequest(betterproto.Message): governance_framework: \"GovernanceFramework\" = betterproto.message_field(1) @dataclass(eq=False,", "request.governance_framework_uri = governance_framework_uri return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/RegisterIssuer\", request, RegisterIssuerResponse, ) async def register_verifier(", "options: \"__common_v1__.RequestOptions\" = betterproto.message_field(5) @dataclass(eq=False, repr=False) class SearchRegistryResponse(betterproto.Message): items_json: str = betterproto.string_field(1) has_more:", "group=\"authority\") presentation_type_uri: str = betterproto.string_field(10) valid_from_utc: int = betterproto.uint64_field(11) valid_until_utc: int = betterproto.uint64_field(12)", "python-betterproto from dataclasses import dataclass from typing import AsyncIterator, Dict import betterproto from", "str, credential_type_uri: str, valid_from_utc: int, valid_until_utc: int, governance_framework_uri: str, ) -> \"RegisterIssuerResponse\": raise", "group=\"member\") x509_cert: str = betterproto.string_field(3, group=\"member\") credential_type_uri: str = betterproto.string_field(4) @dataclass(eq=False, repr=False) class", "= \"\", continuation_token: str = \"\", options: \"__common_v1__.RequestOptions\" = None, ) -> \"SearchRegistryResponse\":", "{ \"governance_framework_uri\": request.governance_framework_uri, \"query\": request.query, } await self._call_rpc_handler_server_stream( self.fetch_data, stream, request_kwargs, ) def", "REVOKED = 3 NOT_FOUND = 10 @dataclass(eq=False, repr=False) class AddFrameworkRequest(betterproto.Message): governance_framework: \"GovernanceFramework\" =", "grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def remove_framework( self, governance_framework: \"GovernanceFramework\" ) -> \"RemoveFrameworkResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async", "= query request.continuation_token = continuation_token if options is not None: request.options = options", ") -> \"RegisterIssuerResponse\": request = RegisterIssuerRequest() if did_uri: request.did_uri = did_uri if x509_cert:", "request.presentation_type_uri = presentation_type_uri request.governance_framework_uri = governance_framework_uri return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/UnregisterVerifier\", request, UnregisterVerifierResponse, )", "response class TrustRegistryBase(ServiceBase): async def add_framework( self, governance_framework: \"GovernanceFramework\" ) -> \"AddFrameworkResponse\": raise", "response = await self.unregister_verifier(**request_kwargs) await stream.send_message(response) async def __rpc_check_issuer_status(self, stream: grpclib.server.Stream) -> None:", "\"x509_cert\": request.x509_cert, \"presentation_type_uri\": request.presentation_type_uri, } response = await self.check_verifier_status(**request_kwargs) await stream.send_message(response) async def", "await self.add_framework(**request_kwargs) await stream.send_message(response) async def __rpc_remove_framework(self, stream: grpclib.server.Stream) -> None: request =", "self.__rpc_unregister_issuer, grpclib.const.Cardinality.UNARY_UNARY, UnregisterIssuerRequest, UnregisterIssuerResponse, ), \"/services.trustregistry.v1.TrustRegistry/UnregisterVerifier\": grpclib.const.Handler( self.__rpc_unregister_verifier, grpclib.const.Cardinality.UNARY_UNARY, UnregisterVerifierRequest, UnregisterVerifierResponse, ), \"/services.trustregistry.v1.TrustRegistry/CheckIssuerStatus\":", "x509_cert: str = betterproto.string_field(3, group=\"member\") credential_type_uri: str = betterproto.string_field(4) @dataclass(eq=False, repr=False) class CheckIssuerStatusResponse(betterproto.Message):", "governance_framework_uri: str = \"\", ) -> \"RegisterIssuerResponse\": request = RegisterIssuerRequest() if did_uri: request.did_uri", "repr=False) class CheckVerifierStatusRequest(betterproto.Message): governance_framework_uri: str = betterproto.string_field(1) did_uri: str = betterproto.string_field(2, group=\"member\") x509_cert:", "if x509_cert: request.x509_cert = x509_cert request.credential_type_uri = credential_type_uri request.governance_framework_uri = governance_framework_uri return await", "= x509_cert request.credential_type_uri = credential_type_uri request.governance_framework_uri = governance_framework_uri return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/UnregisterIssuer\", request,", "request = CheckVerifierStatusRequest() request.governance_framework_uri = governance_framework_uri if did_uri: request.did_uri = did_uri if x509_cert:", "RegisterVerifierResponse(betterproto.Message): status: \"__common_v1__.ResponseStatus\" = betterproto.enum_field(1) @dataclass(eq=False, repr=False) class UnregisterIssuerRequest(betterproto.Message): did_uri: str = betterproto.string_field(1,", "unregister_verifier( self, *, did_uri: str = \"\", x509_cert: str = \"\", presentation_type_uri: str", "int = betterproto.uint64_field(12) governance_framework_uri: str = betterproto.string_field(20) @dataclass(eq=False, repr=False) class RegisterIssuerResponse(betterproto.Message): status: \"__common_v1__.ResponseStatus\"", "grpclib.const.Cardinality.UNARY_UNARY, RemoveFrameworkRequest, RemoveFrameworkResponse, ), \"/services.trustregistry.v1.TrustRegistry/SearchRegistry\": grpclib.const.Handler( self.__rpc_search_registry, grpclib.const.Cardinality.UNARY_UNARY, SearchRegistryRequest, SearchRegistryResponse, ), \"/services.trustregistry.v1.TrustRegistry/RegisterIssuer\": grpclib.const.Handler(", "-> \"UnregisterVerifierResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def check_issuer_status( self, governance_framework_uri: str, did_uri: str, x509_cert:", "{ \"governance_framework\": request.governance_framework, } response = await self.remove_framework(**request_kwargs) await stream.send_message(response) async def __rpc_search_registry(self,", "= RegisterIssuerRequest() if did_uri: request.did_uri = did_uri if x509_cert: request.x509_cert = x509_cert request.credential_type_uri", "unregister_issuer( self, did_uri: str, x509_cert: str, credential_type_uri: str, governance_framework_uri: str, ) -> \"UnregisterIssuerResponse\":", "\"\", ) -> \"CheckIssuerStatusResponse\": request = CheckIssuerStatusRequest() request.governance_framework_uri = governance_framework_uri if did_uri: request.did_uri", "= None ) -> \"RemoveFrameworkResponse\": request = RemoveFrameworkRequest() if governance_framework is not None:", "RemoveFrameworkRequest() if governance_framework is not None: request.governance_framework = governance_framework return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/RemoveFramework\",", "= presentation_type_uri request.governance_framework_uri = governance_framework_uri return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/UnregisterVerifier\", request, UnregisterVerifierResponse, ) async", "await self.unregister_issuer(**request_kwargs) await stream.send_message(response) async def __rpc_unregister_verifier(self, stream: grpclib.server.Stream) -> None: request =", "\"CheckVerifierStatusResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def fetch_data( self, governance_framework_uri: str, query: str ) ->", "count: int = betterproto.int32_field(3) continuation_token: str = betterproto.string_field(4) @dataclass(eq=False, repr=False) class GovernanceFramework(betterproto.Message): governance_framework_uri:", "@dataclass(eq=False, repr=False) class CheckVerifierStatusResponse(betterproto.Message): status: \"RegistrationStatus\" = betterproto.enum_field(1) @dataclass(eq=False, repr=False) class FetchDataRequest(betterproto.Message): governance_framework_uri:", "betterproto.string_field(4) @dataclass(eq=False, repr=False) class GovernanceFramework(betterproto.Message): governance_framework_uri: str = betterproto.string_field(1) trust_registry_uri: str = betterproto.string_field(2)", "grpclib.server.Stream) -> None: request = await stream.recv_message() request_kwargs = { \"query\": request.query, \"continuation_token\":", "= betterproto.uint64_field(11) valid_until_utc: int = betterproto.uint64_field(12) governance_framework_uri: str = betterproto.string_field(20) @dataclass(eq=False, repr=False) class", "str = \"\", did_uri: str = \"\", x509_cert: str = \"\", presentation_type_uri: str", "request.valid_from_utc, \"valid_until_utc\": request.valid_until_utc, \"governance_framework_uri\": request.governance_framework_uri, } response = await self.register_verifier(**request_kwargs) await stream.send_message(response) async", "stream.recv_message() request_kwargs = { \"did_uri\": request.did_uri, \"x509_cert\": request.x509_cert, \"presentation_type_uri\": request.presentation_type_uri, \"valid_from_utc\": request.valid_from_utc, \"valid_until_utc\":", "str, presentation_type_uri: str, ) -> \"CheckVerifierStatusResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def fetch_data( self, governance_framework_uri:", "\"credential_type_uri\": request.credential_type_uri, } response = await self.check_issuer_status(**request_kwargs) await stream.send_message(response) async def __rpc_check_verifier_status(self, stream:", "= betterproto.message_field(1) @dataclass(eq=False, repr=False) class AddFrameworkResponse(betterproto.Message): status: \"__common_v1__.ResponseStatus\" = betterproto.enum_field(1) @dataclass(eq=False, repr=False) class", "UnregisterVerifierResponse(betterproto.Message): status: \"__common_v1__.ResponseStatus\" = betterproto.enum_field(1) @dataclass(eq=False, repr=False) class CheckIssuerStatusRequest(betterproto.Message): governance_framework_uri: str = betterproto.string_field(1)", "\"SearchRegistryResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def register_issuer( self, did_uri: str, x509_cert: str, credential_type_uri: str,", "\"presentation_type_uri\": request.presentation_type_uri, \"valid_from_utc\": request.valid_from_utc, \"valid_until_utc\": request.valid_until_utc, \"governance_framework_uri\": request.governance_framework_uri, } response = await self.register_verifier(**request_kwargs)", "str = \"\", ) -> \"CheckVerifierStatusResponse\": request = CheckVerifierStatusRequest() request.governance_framework_uri = governance_framework_uri if", "return { \"/services.trustregistry.v1.TrustRegistry/AddFramework\": grpclib.const.Handler( self.__rpc_add_framework, grpclib.const.Cardinality.UNARY_UNARY, AddFrameworkRequest, AddFrameworkResponse, ), \"/services.trustregistry.v1.TrustRegistry/RemoveFramework\": grpclib.const.Handler( self.__rpc_remove_framework, grpclib.const.Cardinality.UNARY_UNARY,", "governance_framework_uri return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/UnregisterVerifier\", request, UnregisterVerifierResponse, ) async def check_issuer_status( self, *,", "continuation_token if options is not None: request.options = options return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/SearchRegistry\",", "betterproto.string_field(1, group=\"authority\") x509_cert: str = betterproto.string_field(2, group=\"authority\") credential_type_uri: str = betterproto.string_field(10) governance_framework_uri: str", "betterproto.string_field(10) governance_framework_uri: str = betterproto.string_field(20) @dataclass(eq=False, repr=False) class UnregisterVerifierResponse(betterproto.Message): status: \"__common_v1__.ResponseStatus\" = betterproto.enum_field(1)", "request.governance_framework_uri, \"query\": request.query, } await self._call_rpc_handler_server_stream( self.fetch_data, stream, request_kwargs, ) def __mapping__(self) ->", "), \"/services.trustregistry.v1.TrustRegistry/CheckVerifierStatus\": grpclib.const.Handler( self.__rpc_check_verifier_status, grpclib.const.Cardinality.UNARY_UNARY, CheckVerifierStatusRequest, CheckVerifierStatusResponse, ), \"/services.trustregistry.v1.TrustRegistry/FetchData\": grpclib.const.Handler( self.__rpc_fetch_data, grpclib.const.Cardinality.UNARY_STREAM, FetchDataRequest,", "request.did_uri, \"x509_cert\": request.x509_cert, \"credential_type_uri\": request.credential_type_uri, \"governance_framework_uri\": request.governance_framework_uri, } response = await self.unregister_issuer(**request_kwargs) await", "= await self.check_verifier_status(**request_kwargs) await stream.send_message(response) async def __rpc_fetch_data(self, stream: grpclib.server.Stream) -> None: request", "import ServiceBase import grpclib class RegistrationStatus(betterproto.Enum): CURRENT = 0 EXPIRED = 1 TERMINATED", "= betterproto.string_field(4) @dataclass(eq=False, repr=False) class GovernanceFramework(betterproto.Message): governance_framework_uri: str = betterproto.string_field(1) trust_registry_uri: str =", "return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/CheckVerifierStatus\", request, CheckVerifierStatusResponse, ) async def fetch_data( self, *, governance_framework_uri:", "grpclib.server.Stream) -> None: request = await stream.recv_message() request_kwargs = { \"governance_framework_uri\": request.governance_framework_uri, \"query\":", "continuation_token: str = \"\", options: \"__common_v1__.RequestOptions\" = None, ) -> \"SearchRegistryResponse\": request =", "\"credential_type_uri\": request.credential_type_uri, \"valid_from_utc\": request.valid_from_utc, \"valid_until_utc\": request.valid_until_utc, \"governance_framework_uri\": request.governance_framework_uri, } response = await self.register_issuer(**request_kwargs)", "betterproto.string_field(20) @dataclass(eq=False, repr=False) class UnregisterVerifierResponse(betterproto.Message): status: \"__common_v1__.ResponseStatus\" = betterproto.enum_field(1) @dataclass(eq=False, repr=False) class CheckIssuerStatusRequest(betterproto.Message):", "check_issuer_status( self, governance_framework_uri: str, did_uri: str, x509_cert: str, credential_type_uri: str, ) -> \"CheckIssuerStatusResponse\":", "= { \"governance_framework_uri\": request.governance_framework_uri, \"did_uri\": request.did_uri, \"x509_cert\": request.x509_cert, \"presentation_type_uri\": request.presentation_type_uri, } response =", "10 @dataclass(eq=False, repr=False) class AddFrameworkRequest(betterproto.Message): governance_framework: \"GovernanceFramework\" = betterproto.message_field(1) @dataclass(eq=False, repr=False) class AddFrameworkResponse(betterproto.Message):", "\"x509_cert\": request.x509_cert, \"presentation_type_uri\": request.presentation_type_uri, \"valid_from_utc\": request.valid_from_utc, \"valid_until_utc\": request.valid_until_utc, \"governance_framework_uri\": request.governance_framework_uri, } response =", "await stream.send_message(response) async def __rpc_check_issuer_status(self, stream: grpclib.server.Stream) -> None: request = await stream.recv_message()", "search_registry( self, *, query: str = \"\", continuation_token: str = \"\", options: \"__common_v1__.RequestOptions\"", "betterproto.string_field(10) valid_from_utc: int = betterproto.uint64_field(11) valid_until_utc: int = betterproto.uint64_field(12) governance_framework_uri: str = betterproto.string_field(20)", "= 3 NOT_FOUND = 10 @dataclass(eq=False, repr=False) class AddFrameworkRequest(betterproto.Message): governance_framework: \"GovernanceFramework\" = betterproto.message_field(1)", "bool = betterproto.bool_field(2) continuation_token: str = betterproto.string_field(3) class TrustRegistryStub(betterproto.ServiceStub): async def add_framework( self,", "self, *, governance_framework: \"GovernanceFramework\" = None ) -> \"RemoveFrameworkResponse\": request = RemoveFrameworkRequest() if", "= \"\", presentation_type_uri: str = \"\", valid_from_utc: int = 0, valid_until_utc: int =", "\"governance_framework_uri\": request.governance_framework_uri, \"query\": request.query, } await self._call_rpc_handler_server_stream( self.fetch_data, stream, request_kwargs, ) def __mapping__(self)", "{ \"did_uri\": request.did_uri, \"x509_cert\": request.x509_cert, \"presentation_type_uri\": request.presentation_type_uri, \"valid_from_utc\": request.valid_from_utc, \"valid_until_utc\": request.valid_until_utc, \"governance_framework_uri\": request.governance_framework_uri,", "did_uri if x509_cert: request.x509_cert = x509_cert request.credential_type_uri = credential_type_uri request.valid_from_utc = valid_from_utc request.valid_until_utc", "self, did_uri: str, x509_cert: str, presentation_type_uri: str, valid_from_utc: int, valid_until_utc: int, governance_framework_uri: str,", "FetchDataRequest() request.governance_framework_uri = governance_framework_uri request.query = query async for response in self._unary_stream( \"/services.trustregistry.v1.TrustRegistry/FetchData\",", "await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/SearchRegistry\", request, SearchRegistryResponse, ) async def register_issuer( self, *, did_uri: str", "betterproto.bool_field(2) count: int = betterproto.int32_field(3) continuation_token: str = betterproto.string_field(4) @dataclass(eq=False, repr=False) class GovernanceFramework(betterproto.Message):", "not None: request.governance_framework = governance_framework return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/AddFramework\", request, AddFrameworkResponse, ) async", "credential_type_uri: str, governance_framework_uri: str, ) -> \"UnregisterIssuerResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def unregister_verifier( self,", ") async def register_issuer( self, *, did_uri: str = \"\", x509_cert: str =", "x509_cert request.credential_type_uri = credential_type_uri request.governance_framework_uri = governance_framework_uri return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/UnregisterIssuer\", request, UnregisterIssuerResponse,", "), \"/services.trustregistry.v1.TrustRegistry/CheckIssuerStatus\": grpclib.const.Handler( self.__rpc_check_issuer_status, grpclib.const.Cardinality.UNARY_UNARY, CheckIssuerStatusRequest, CheckIssuerStatusResponse, ), \"/services.trustregistry.v1.TrustRegistry/CheckVerifierStatus\": grpclib.const.Handler( self.__rpc_check_verifier_status, grpclib.const.Cardinality.UNARY_UNARY, CheckVerifierStatusRequest,", "\"\", governance_framework_uri: str = \"\", ) -> \"UnregisterVerifierResponse\": request = UnregisterVerifierRequest() if did_uri:", "= \"\", governance_framework_uri: str = \"\", ) -> \"UnregisterVerifierResponse\": request = UnregisterVerifierRequest() if", "\"\", credential_type_uri: str = \"\", governance_framework_uri: str = \"\", ) -> \"UnregisterIssuerResponse\": request", "continuation_token: str = betterproto.string_field(4) @dataclass(eq=False, repr=False) class GovernanceFramework(betterproto.Message): governance_framework_uri: str = betterproto.string_field(1) trust_registry_uri:", "fetch_data( self, *, governance_framework_uri: str = \"\", query: str = \"\" ) ->", "valid_until_utc request.governance_framework_uri = governance_framework_uri return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/RegisterIssuer\", request, RegisterIssuerResponse, ) async def", "class RemoveFrameworkResponse(betterproto.Message): status: \"__common_v1__.ResponseStatus\" = betterproto.enum_field(1) @dataclass(eq=False, repr=False) class SearchRegistryRequest(betterproto.Message): query: str =", "SearchRegistryResponse, ), \"/services.trustregistry.v1.TrustRegistry/RegisterIssuer\": grpclib.const.Handler( self.__rpc_register_issuer, grpclib.const.Cardinality.UNARY_UNARY, RegisterIssuerRequest, RegisterIssuerResponse, ), \"/services.trustregistry.v1.TrustRegistry/RegisterVerifier\": grpclib.const.Handler( self.__rpc_register_verifier, grpclib.const.Cardinality.UNARY_UNARY,", "request_kwargs = { \"did_uri\": request.did_uri, \"x509_cert\": request.x509_cert, \"credential_type_uri\": request.credential_type_uri, \"valid_from_utc\": request.valid_from_utc, \"valid_until_utc\": request.valid_until_utc,", "__rpc_check_verifier_status(self, stream: grpclib.server.Stream) -> None: request = await stream.recv_message() request_kwargs = { \"governance_framework_uri\":", "grpclib.server.Stream) -> None: request = await stream.recv_message() request_kwargs = { \"governance_framework\": request.governance_framework, }", "\"AddFrameworkResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def remove_framework( self, governance_framework: \"GovernanceFramework\" ) -> \"RemoveFrameworkResponse\": raise", "= betterproto.string_field(4) @dataclass(eq=False, repr=False) class CheckIssuerStatusResponse(betterproto.Message): status: \"RegistrationStatus\" = betterproto.enum_field(1) @dataclass(eq=False, repr=False) class", "CheckVerifierStatusResponse, ) async def fetch_data( self, *, governance_framework_uri: str = \"\", query: str", "request.presentation_type_uri = presentation_type_uri request.valid_from_utc = valid_from_utc request.valid_until_utc = valid_until_utc request.governance_framework_uri = governance_framework_uri return", "request.governance_framework_uri = governance_framework_uri if did_uri: request.did_uri = did_uri if x509_cert: request.x509_cert = x509_cert", "self.remove_framework(**request_kwargs) await stream.send_message(response) async def __rpc_search_registry(self, stream: grpclib.server.Stream) -> None: request = await", "raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def register_issuer( self, did_uri: str, x509_cert: str, credential_type_uri: str, valid_from_utc:", "did_uri: str, x509_cert: str, presentation_type_uri: str, valid_from_utc: int, valid_until_utc: int, governance_framework_uri: str, )", "\"governance_framework\": request.governance_framework, } response = await self.remove_framework(**request_kwargs) await stream.send_message(response) async def __rpc_search_registry(self, stream:", "= betterproto.string_field(20) @dataclass(eq=False, repr=False) class RegisterIssuerResponse(betterproto.Message): status: \"__common_v1__.ResponseStatus\" = betterproto.enum_field(1) @dataclass(eq=False, repr=False) class", "str, ) -> \"CheckVerifierStatusResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def fetch_data( self, governance_framework_uri: str, query:", "stream: grpclib.server.Stream) -> None: request = await stream.recv_message() request_kwargs = { \"did_uri\": request.did_uri,", "description: str = betterproto.string_field(3) @dataclass(eq=False, repr=False) class RegisterIssuerRequest(betterproto.Message): did_uri: str = betterproto.string_field(1, group=\"authority\")", "valid_until_utc: int = betterproto.uint64_field(12) governance_framework_uri: str = betterproto.string_field(20) @dataclass(eq=False, repr=False) class RegisterIssuerResponse(betterproto.Message): status:", "-> \"RegisterIssuerResponse\": request = RegisterIssuerRequest() if did_uri: request.did_uri = did_uri if x509_cert: request.x509_cert", "credential_type_uri: str, valid_from_utc: int, valid_until_utc: int, governance_framework_uri: str, ) -> \"RegisterIssuerResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)", "betterproto.string_field(3, group=\"member\") credential_type_uri: str = betterproto.string_field(4) @dataclass(eq=False, repr=False) class CheckIssuerStatusResponse(betterproto.Message): status: \"RegistrationStatus\" =", "query: str = betterproto.string_field(2) @dataclass(eq=False, repr=False) class FetchDataResponse(betterproto.Message): response_json: str = betterproto.string_field(1) has_more_results:", "stream.recv_message() request_kwargs = { \"query\": request.query, \"continuation_token\": request.continuation_token, \"options\": request.options, } response =", "-> \"RemoveFrameworkResponse\": request = RemoveFrameworkRequest() if governance_framework is not None: request.governance_framework = governance_framework", "__rpc_add_framework(self, stream: grpclib.server.Stream) -> None: request = await stream.recv_message() request_kwargs = { \"governance_framework\":", "class FetchDataRequest(betterproto.Message): governance_framework_uri: str = betterproto.string_field(1) query: str = betterproto.string_field(2) @dataclass(eq=False, repr=False) class", "-> \"UnregisterIssuerResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def unregister_verifier( self, did_uri: str, x509_cert: str, presentation_type_uri:", "\"__common_v1__.ResponseStatus\" = betterproto.enum_field(1) @dataclass(eq=False, repr=False) class UnregisterVerifierRequest(betterproto.Message): did_uri: str = betterproto.string_field(1, group=\"authority\") x509_cert:", "governance_framework return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/AddFramework\", request, AddFrameworkResponse, ) async def remove_framework( self, *,", "RegisterIssuerRequest() if did_uri: request.did_uri = did_uri if x509_cert: request.x509_cert = x509_cert request.credential_type_uri =", "= governance_framework_uri if did_uri: request.did_uri = did_uri if x509_cert: request.x509_cert = x509_cert request.credential_type_uri", "str = \"\", x509_cert: str = \"\", presentation_type_uri: str = \"\", ) ->", "{ \"did_uri\": request.did_uri, \"x509_cert\": request.x509_cert, \"credential_type_uri\": request.credential_type_uri, \"valid_from_utc\": request.valid_from_utc, \"valid_until_utc\": request.valid_until_utc, \"governance_framework_uri\": request.governance_framework_uri,", "betterproto.enum_field(1) @dataclass(eq=False, repr=False) class RegisterVerifierRequest(betterproto.Message): did_uri: str = betterproto.string_field(1, group=\"authority\") x509_cert: str =", "= betterproto.string_field(1) did_uri: str = betterproto.string_field(2, group=\"member\") x509_cert: str = betterproto.string_field(3, group=\"member\") credential_type_uri:", "\"RegisterIssuerResponse\": request = RegisterIssuerRequest() if did_uri: request.did_uri = did_uri if x509_cert: request.x509_cert =", "@dataclass(eq=False, repr=False) class CheckIssuerStatusResponse(betterproto.Message): status: \"RegistrationStatus\" = betterproto.enum_field(1) @dataclass(eq=False, repr=False) class CheckVerifierStatusRequest(betterproto.Message): governance_framework_uri:", "Generated by the protocol buffer compiler. DO NOT EDIT! # sources: services/trust-registry/v1/trust-registry.proto #", "str, governance_framework_uri: str, ) -> \"UnregisterVerifierResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def check_issuer_status( self, governance_framework_uri:", "x509_cert: str = betterproto.string_field(3, group=\"member\") presentation_type_uri: str = betterproto.string_field(4) @dataclass(eq=False, repr=False) class CheckVerifierStatusResponse(betterproto.Message):", "did_uri if x509_cert: request.x509_cert = x509_cert request.credential_type_uri = credential_type_uri request.governance_framework_uri = governance_framework_uri return", "stream.send_message(response) async def __rpc_register_issuer(self, stream: grpclib.server.Stream) -> None: request = await stream.recv_message() request_kwargs", "-> \"AddFrameworkResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def remove_framework( self, governance_framework: \"GovernanceFramework\" ) -> \"RemoveFrameworkResponse\":", "options: \"__common_v1__.RequestOptions\" = None, ) -> \"SearchRegistryResponse\": request = SearchRegistryRequest() request.query = query", "request, CheckVerifierStatusResponse, ) async def fetch_data( self, *, governance_framework_uri: str = \"\", query:", "request.x509_cert = x509_cert request.presentation_type_uri = presentation_type_uri request.governance_framework_uri = governance_framework_uri return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/UnregisterVerifier\",", ") async def remove_framework( self, *, governance_framework: \"GovernanceFramework\" = None ) -> \"RemoveFrameworkResponse\":", "None: request = await stream.recv_message() request_kwargs = { \"governance_framework\": request.governance_framework, } response =", "request, UnregisterVerifierResponse, ) async def check_issuer_status( self, *, governance_framework_uri: str = \"\", did_uri:", "request.x509_cert, \"credential_type_uri\": request.credential_type_uri, } response = await self.check_issuer_status(**request_kwargs) await stream.send_message(response) async def __rpc_check_verifier_status(self,", "\"\", options: \"__common_v1__.RequestOptions\" = None, ) -> \"SearchRegistryResponse\": request = SearchRegistryRequest() request.query =", "repr=False) class SearchRegistryRequest(betterproto.Message): query: str = betterproto.string_field(1) continuation_token: str = betterproto.string_field(2) options: \"__common_v1__.RequestOptions\"", "str = betterproto.string_field(1, group=\"authority\") x509_cert: str = betterproto.string_field(2, group=\"authority\") presentation_type_uri: str = betterproto.string_field(10)", "= governance_framework_uri return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/RegisterVerifier\", request, RegisterVerifierResponse, ) async def unregister_issuer( self,", "\"query\": request.query, \"continuation_token\": request.continuation_token, \"options\": request.options, } response = await self.search_registry(**request_kwargs) await stream.send_message(response)", "\"x509_cert\": request.x509_cert, \"credential_type_uri\": request.credential_type_uri, } response = await self.check_issuer_status(**request_kwargs) await stream.send_message(response) async def", "await stream.recv_message() request_kwargs = { \"did_uri\": request.did_uri, \"x509_cert\": request.x509_cert, \"credential_type_uri\": request.credential_type_uri, \"governance_framework_uri\": request.governance_framework_uri,", "self, query: str, continuation_token: str, options: \"__common_v1__.RequestOptions\", ) -> \"SearchRegistryResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async", "governance_framework_uri if did_uri: request.did_uri = did_uri if x509_cert: request.x509_cert = x509_cert request.credential_type_uri =", "\"/services.trustregistry.v1.TrustRegistry/SearchRegistry\": grpclib.const.Handler( self.__rpc_search_registry, grpclib.const.Cardinality.UNARY_UNARY, SearchRegistryRequest, SearchRegistryResponse, ), \"/services.trustregistry.v1.TrustRegistry/RegisterIssuer\": grpclib.const.Handler( self.__rpc_register_issuer, grpclib.const.Cardinality.UNARY_UNARY, RegisterIssuerRequest, RegisterIssuerResponse,", "repr=False) class RegisterVerifierRequest(betterproto.Message): did_uri: str = betterproto.string_field(1, group=\"authority\") x509_cert: str = betterproto.string_field(2, group=\"authority\")", "= governance_framework_uri if did_uri: request.did_uri = did_uri if x509_cert: request.x509_cert = x509_cert request.presentation_type_uri", "= betterproto.string_field(10) governance_framework_uri: str = betterproto.string_field(20) @dataclass(eq=False, repr=False) class UnregisterIssuerResponse(betterproto.Message): status: \"__common_v1__.ResponseStatus\" =", "= 2 REVOKED = 3 NOT_FOUND = 10 @dataclass(eq=False, repr=False) class AddFrameworkRequest(betterproto.Message): governance_framework:", "async def fetch_data( self, governance_framework_uri: str, query: str ) -> AsyncIterator[\"FetchDataResponse\"]: raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)", "continuation_token: str = betterproto.string_field(2) options: \"__common_v1__.RequestOptions\" = betterproto.message_field(5) @dataclass(eq=False, repr=False) class SearchRegistryResponse(betterproto.Message): items_json:", "self.__rpc_search_registry, grpclib.const.Cardinality.UNARY_UNARY, SearchRegistryRequest, SearchRegistryResponse, ), \"/services.trustregistry.v1.TrustRegistry/RegisterIssuer\": grpclib.const.Handler( self.__rpc_register_issuer, grpclib.const.Cardinality.UNARY_UNARY, RegisterIssuerRequest, RegisterIssuerResponse, ), \"/services.trustregistry.v1.TrustRegistry/RegisterVerifier\":", "= \"\", options: \"__common_v1__.RequestOptions\" = None, ) -> \"SearchRegistryResponse\": request = SearchRegistryRequest() request.query", "\"did_uri\": request.did_uri, \"x509_cert\": request.x509_cert, \"presentation_type_uri\": request.presentation_type_uri, \"governance_framework_uri\": request.governance_framework_uri, } response = await self.unregister_verifier(**request_kwargs)", "repr=False) class RegisterIssuerResponse(betterproto.Message): status: \"__common_v1__.ResponseStatus\" = betterproto.enum_field(1) @dataclass(eq=False, repr=False) class RegisterVerifierRequest(betterproto.Message): did_uri: str", "betterproto.string_field(1) trust_registry_uri: str = betterproto.string_field(2) description: str = betterproto.string_field(3) @dataclass(eq=False, repr=False) class RegisterIssuerRequest(betterproto.Message):", "RegisterVerifierResponse, ), \"/services.trustregistry.v1.TrustRegistry/UnregisterIssuer\": grpclib.const.Handler( self.__rpc_unregister_issuer, grpclib.const.Cardinality.UNARY_UNARY, UnregisterIssuerRequest, UnregisterIssuerResponse, ), \"/services.trustregistry.v1.TrustRegistry/UnregisterVerifier\": grpclib.const.Handler( self.__rpc_unregister_verifier, grpclib.const.Cardinality.UNARY_UNARY,", "= UnregisterVerifierRequest() if did_uri: request.did_uri = did_uri if x509_cert: request.x509_cert = x509_cert request.presentation_type_uri", "int, governance_framework_uri: str, ) -> \"RegisterVerifierResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def unregister_issuer( self, did_uri:", "request.valid_from_utc, \"valid_until_utc\": request.valid_until_utc, \"governance_framework_uri\": request.governance_framework_uri, } response = await self.register_issuer(**request_kwargs) await stream.send_message(response) async", "did_uri: str, x509_cert: str, presentation_type_uri: str, governance_framework_uri: str, ) -> \"UnregisterVerifierResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)", ") -> AsyncIterator[\"FetchDataResponse\"]: raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def __rpc_add_framework(self, stream: grpclib.server.Stream) -> None: request", "status: \"__common_v1__.ResponseStatus\" = betterproto.enum_field(1) @dataclass(eq=False, repr=False) class UnregisterVerifierRequest(betterproto.Message): did_uri: str = betterproto.string_field(1, group=\"authority\")", "), \"/services.trustregistry.v1.TrustRegistry/UnregisterVerifier\": grpclib.const.Handler( self.__rpc_unregister_verifier, grpclib.const.Cardinality.UNARY_UNARY, UnregisterVerifierRequest, UnregisterVerifierResponse, ), \"/services.trustregistry.v1.TrustRegistry/CheckIssuerStatus\": grpclib.const.Handler( self.__rpc_check_issuer_status, grpclib.const.Cardinality.UNARY_UNARY, CheckIssuerStatusRequest,", "-> None: request = await stream.recv_message() request_kwargs = { \"query\": request.query, \"continuation_token\": request.continuation_token,", "\"/services.trustregistry.v1.TrustRegistry/AddFramework\", request, AddFrameworkResponse, ) async def remove_framework( self, *, governance_framework: \"GovernanceFramework\" = None", "\"query\": request.query, } await self._call_rpc_handler_server_stream( self.fetch_data, stream, request_kwargs, ) def __mapping__(self) -> Dict[str,", "\"\", x509_cert: str = \"\", credential_type_uri: str = \"\", valid_from_utc: int = 0,", "await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/CheckVerifierStatus\", request, CheckVerifierStatusResponse, ) async def fetch_data( self, *, governance_framework_uri: str", "AsyncIterator[\"FetchDataResponse\"]: request = FetchDataRequest() request.governance_framework_uri = governance_framework_uri request.query = query async for response", "__rpc_register_verifier(self, stream: grpclib.server.Stream) -> None: request = await stream.recv_message() request_kwargs = { \"did_uri\":", "str = \"\", presentation_type_uri: str = \"\", ) -> \"CheckVerifierStatusResponse\": request = CheckVerifierStatusRequest()", "request.governance_framework_uri, } response = await self.unregister_issuer(**request_kwargs) await stream.send_message(response) async def __rpc_unregister_verifier(self, stream: grpclib.server.Stream)", "RegisterIssuerRequest, RegisterIssuerResponse, ), \"/services.trustregistry.v1.TrustRegistry/RegisterVerifier\": grpclib.const.Handler( self.__rpc_register_verifier, grpclib.const.Cardinality.UNARY_UNARY, RegisterVerifierRequest, RegisterVerifierResponse, ), \"/services.trustregistry.v1.TrustRegistry/UnregisterIssuer\": grpclib.const.Handler( self.__rpc_unregister_issuer,", "\"RegisterVerifierResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def unregister_issuer( self, did_uri: str, x509_cert: str, credential_type_uri: str,", "= betterproto.enum_field(1) @dataclass(eq=False, repr=False) class FetchDataRequest(betterproto.Message): governance_framework_uri: str = betterproto.string_field(1) query: str =", "request.did_uri = did_uri if x509_cert: request.x509_cert = x509_cert request.credential_type_uri = credential_type_uri request.governance_framework_uri =", "grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def unregister_issuer( self, did_uri: str, x509_cert: str, credential_type_uri: str, governance_framework_uri: str,", "did_uri if x509_cert: request.x509_cert = x509_cert request.presentation_type_uri = presentation_type_uri return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/CheckVerifierStatus\",", "def search_registry( self, *, query: str = \"\", continuation_token: str = \"\", options:", "self, *, governance_framework: \"GovernanceFramework\" = None ) -> \"AddFrameworkResponse\": request = AddFrameworkRequest() if", ") -> \"SearchRegistryResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def register_issuer( self, did_uri: str, x509_cert: str,", "\"\", x509_cert: str = \"\", presentation_type_uri: str = \"\", ) -> \"CheckVerifierStatusResponse\": request", "request.governance_framework_uri = governance_framework_uri return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/RegisterVerifier\", request, RegisterVerifierResponse, ) async def unregister_issuer(", "betterproto.enum_field(1) @dataclass(eq=False, repr=False) class UnregisterVerifierRequest(betterproto.Message): did_uri: str = betterproto.string_field(1, group=\"authority\") x509_cert: str =", "repr=False) class CheckIssuerStatusRequest(betterproto.Message): governance_framework_uri: str = betterproto.string_field(1) did_uri: str = betterproto.string_field(2, group=\"member\") x509_cert:", "request_kwargs = { \"did_uri\": request.did_uri, \"x509_cert\": request.x509_cert, \"presentation_type_uri\": request.presentation_type_uri, \"governance_framework_uri\": request.governance_framework_uri, } response", "str, continuation_token: str, options: \"__common_v1__.RequestOptions\", ) -> \"SearchRegistryResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def register_issuer(", "governance_framework_uri: str, did_uri: str, x509_cert: str, presentation_type_uri: str, ) -> \"CheckVerifierStatusResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)", "__rpc_check_issuer_status(self, stream: grpclib.server.Stream) -> None: request = await stream.recv_message() request_kwargs = { \"governance_framework_uri\":", "), \"/services.trustregistry.v1.TrustRegistry/FetchData\": grpclib.const.Handler( self.__rpc_fetch_data, grpclib.const.Cardinality.UNARY_STREAM, FetchDataRequest, FetchDataResponse, ), } from ...common import v1", "await stream.send_message(response) async def __rpc_search_registry(self, stream: grpclib.server.Stream) -> None: request = await stream.recv_message()", "self, did_uri: str, x509_cert: str, credential_type_uri: str, valid_from_utc: int, valid_until_utc: int, governance_framework_uri: str,", "def __rpc_unregister_verifier(self, stream: grpclib.server.Stream) -> None: request = await stream.recv_message() request_kwargs = {", "stream: grpclib.server.Stream) -> None: request = await stream.recv_message() request_kwargs = { \"governance_framework\": request.governance_framework,", "request_kwargs = { \"query\": request.query, \"continuation_token\": request.continuation_token, \"options\": request.options, } response = await", "async def __rpc_register_verifier(self, stream: grpclib.server.Stream) -> None: request = await stream.recv_message() request_kwargs =", "\"/services.trustregistry.v1.TrustRegistry/FetchData\", request, FetchDataResponse, ): yield response class TrustRegistryBase(ServiceBase): async def add_framework( self, governance_framework:", "\"SearchRegistryResponse\": request = SearchRegistryRequest() request.query = query request.continuation_token = continuation_token if options is", "did_uri: request.did_uri = did_uri if x509_cert: request.x509_cert = x509_cert request.credential_type_uri = credential_type_uri request.valid_from_utc", "valid_until_utc request.governance_framework_uri = governance_framework_uri return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/RegisterVerifier\", request, RegisterVerifierResponse, ) async def", "presentation_type_uri: str = betterproto.string_field(10) valid_from_utc: int = betterproto.uint64_field(11) valid_until_utc: int = betterproto.uint64_field(12) governance_framework_uri:", "await stream.send_message(response) async def __rpc_register_verifier(self, stream: grpclib.server.Stream) -> None: request = await stream.recv_message()", "self.unregister_issuer(**request_kwargs) await stream.send_message(response) async def __rpc_unregister_verifier(self, stream: grpclib.server.Stream) -> None: request = await", "= betterproto.string_field(10) valid_from_utc: int = betterproto.uint64_field(11) valid_until_utc: int = betterproto.uint64_field(12) governance_framework_uri: str =", "= query async for response in self._unary_stream( \"/services.trustregistry.v1.TrustRegistry/FetchData\", request, FetchDataResponse, ): yield response", "class UnregisterIssuerRequest(betterproto.Message): did_uri: str = betterproto.string_field(1, group=\"authority\") x509_cert: str = betterproto.string_field(2, group=\"authority\") credential_type_uri:", "= 0, valid_until_utc: int = 0, governance_framework_uri: str = \"\", ) -> \"RegisterIssuerResponse\":", "buffer compiler. DO NOT EDIT! # sources: services/trust-registry/v1/trust-registry.proto # plugin: python-betterproto from dataclasses", "repr=False) class RegisterIssuerRequest(betterproto.Message): did_uri: str = betterproto.string_field(1, group=\"authority\") x509_cert: str = betterproto.string_field(2, group=\"authority\")", "str, ) -> \"RegisterIssuerResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def register_verifier( self, did_uri: str, x509_cert:", "self.register_verifier(**request_kwargs) await stream.send_message(response) async def __rpc_unregister_issuer(self, stream: grpclib.server.Stream) -> None: request = await", "grpclib.const.Handler( self.__rpc_unregister_verifier, grpclib.const.Cardinality.UNARY_UNARY, UnregisterVerifierRequest, UnregisterVerifierResponse, ), \"/services.trustregistry.v1.TrustRegistry/CheckIssuerStatus\": grpclib.const.Handler( self.__rpc_check_issuer_status, grpclib.const.Cardinality.UNARY_UNARY, CheckIssuerStatusRequest, CheckIssuerStatusResponse, ),", "stream.send_message(response) async def __rpc_check_issuer_status(self, stream: grpclib.server.Stream) -> None: request = await stream.recv_message() request_kwargs", "= betterproto.string_field(1, group=\"authority\") x509_cert: str = betterproto.string_field(2, group=\"authority\") credential_type_uri: str = betterproto.string_field(10) valid_from_utc:", "-> None: request = await stream.recv_message() request_kwargs = { \"governance_framework_uri\": request.governance_framework_uri, \"did_uri\": request.did_uri,", "continuation_token: str, options: \"__common_v1__.RequestOptions\", ) -> \"SearchRegistryResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def register_issuer( self,", "credential_type_uri: str = betterproto.string_field(4) @dataclass(eq=False, repr=False) class CheckIssuerStatusResponse(betterproto.Message): status: \"RegistrationStatus\" = betterproto.enum_field(1) @dataclass(eq=False,", "\"__common_v1__.RequestOptions\" = None, ) -> \"SearchRegistryResponse\": request = SearchRegistryRequest() request.query = query request.continuation_token", "} response = await self.add_framework(**request_kwargs) await stream.send_message(response) async def __rpc_remove_framework(self, stream: grpclib.server.Stream) ->", "str = \"\", options: \"__common_v1__.RequestOptions\" = None, ) -> \"SearchRegistryResponse\": request = SearchRegistryRequest()", "= betterproto.message_field(5) @dataclass(eq=False, repr=False) class SearchRegistryResponse(betterproto.Message): items_json: str = betterproto.string_field(1) has_more: bool =", "grpclib.const.Cardinality.UNARY_UNARY, UnregisterIssuerRequest, UnregisterIssuerResponse, ), \"/services.trustregistry.v1.TrustRegistry/UnregisterVerifier\": grpclib.const.Handler( self.__rpc_unregister_verifier, grpclib.const.Cardinality.UNARY_UNARY, UnregisterVerifierRequest, UnregisterVerifierResponse, ), \"/services.trustregistry.v1.TrustRegistry/CheckIssuerStatus\": grpclib.const.Handler(", "did_uri: request.did_uri = did_uri if x509_cert: request.x509_cert = x509_cert request.presentation_type_uri = presentation_type_uri request.valid_from_utc", "betterproto.string_field(3, group=\"member\") presentation_type_uri: str = betterproto.string_field(4) @dataclass(eq=False, repr=False) class CheckVerifierStatusResponse(betterproto.Message): status: \"RegistrationStatus\" =", "def __rpc_search_registry(self, stream: grpclib.server.Stream) -> None: request = await stream.recv_message() request_kwargs = {", "request.credential_type_uri, \"governance_framework_uri\": request.governance_framework_uri, } response = await self.unregister_issuer(**request_kwargs) await stream.send_message(response) async def __rpc_unregister_verifier(self,", "for response in self._unary_stream( \"/services.trustregistry.v1.TrustRegistry/FetchData\", request, FetchDataResponse, ): yield response class TrustRegistryBase(ServiceBase): async", "FetchDataResponse, ): yield response class TrustRegistryBase(ServiceBase): async def add_framework( self, governance_framework: \"GovernanceFramework\" )", "request, FetchDataResponse, ): yield response class TrustRegistryBase(ServiceBase): async def add_framework( self, governance_framework: \"GovernanceFramework\"", "# sources: services/trust-registry/v1/trust-registry.proto # plugin: python-betterproto from dataclasses import dataclass from typing import", "request.governance_framework = governance_framework return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/RemoveFramework\", request, RemoveFrameworkResponse, ) async def search_registry(", "request.options = options return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/SearchRegistry\", request, SearchRegistryResponse, ) async def register_issuer(", "x509_cert: str = betterproto.string_field(2, group=\"authority\") presentation_type_uri: str = betterproto.string_field(10) governance_framework_uri: str = betterproto.string_field(20)", "betterproto.grpc.grpclib_server import ServiceBase import grpclib class RegistrationStatus(betterproto.Enum): CURRENT = 0 EXPIRED = 1", "= betterproto.string_field(1, group=\"authority\") x509_cert: str = betterproto.string_field(2, group=\"authority\") presentation_type_uri: str = betterproto.string_field(10) valid_from_utc:", "betterproto.enum_field(1) @dataclass(eq=False, repr=False) class FetchDataRequest(betterproto.Message): governance_framework_uri: str = betterproto.string_field(1) query: str = betterproto.string_field(2)", ") async def unregister_issuer( self, *, did_uri: str = \"\", x509_cert: str =", "= betterproto.string_field(20) @dataclass(eq=False, repr=False) class UnregisterVerifierResponse(betterproto.Message): status: \"__common_v1__.ResponseStatus\" = betterproto.enum_field(1) @dataclass(eq=False, repr=False) class", "= await self.check_issuer_status(**request_kwargs) await stream.send_message(response) async def __rpc_check_verifier_status(self, stream: grpclib.server.Stream) -> None: request", "int = 0, governance_framework_uri: str = \"\", ) -> \"RegisterIssuerResponse\": request = RegisterIssuerRequest()", "\"governance_framework_uri\": request.governance_framework_uri, } response = await self.unregister_issuer(**request_kwargs) await stream.send_message(response) async def __rpc_unregister_verifier(self, stream:", "= \"\", ) -> \"CheckIssuerStatusResponse\": request = CheckIssuerStatusRequest() request.governance_framework_uri = governance_framework_uri if did_uri:", "NOT_FOUND = 10 @dataclass(eq=False, repr=False) class AddFrameworkRequest(betterproto.Message): governance_framework: \"GovernanceFramework\" = betterproto.message_field(1) @dataclass(eq=False, repr=False)", "str ) -> AsyncIterator[\"FetchDataResponse\"]: raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def __rpc_add_framework(self, stream: grpclib.server.Stream) -> None:", "__rpc_search_registry(self, stream: grpclib.server.Stream) -> None: request = await stream.recv_message() request_kwargs = { \"query\":", "grpclib.const.Handler( self.__rpc_register_issuer, grpclib.const.Cardinality.UNARY_UNARY, RegisterIssuerRequest, RegisterIssuerResponse, ), \"/services.trustregistry.v1.TrustRegistry/RegisterVerifier\": grpclib.const.Handler( self.__rpc_register_verifier, grpclib.const.Cardinality.UNARY_UNARY, RegisterVerifierRequest, RegisterVerifierResponse, ),", "-> \"CheckVerifierStatusResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def fetch_data( self, governance_framework_uri: str, query: str )", "\"AddFrameworkResponse\": request = AddFrameworkRequest() if governance_framework is not None: request.governance_framework = governance_framework return", "await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/UnregisterIssuer\", request, UnregisterIssuerResponse, ) async def unregister_verifier( self, *, did_uri: str", "governance_framework_uri: str = betterproto.string_field(20) @dataclass(eq=False, repr=False) class RegisterIssuerResponse(betterproto.Message): status: \"__common_v1__.ResponseStatus\" = betterproto.enum_field(1) @dataclass(eq=False,", "request_kwargs = { \"did_uri\": request.did_uri, \"x509_cert\": request.x509_cert, \"presentation_type_uri\": request.presentation_type_uri, \"valid_from_utc\": request.valid_from_utc, \"valid_until_utc\": request.valid_until_utc,", "self.__rpc_unregister_verifier, grpclib.const.Cardinality.UNARY_UNARY, UnregisterVerifierRequest, UnregisterVerifierResponse, ), \"/services.trustregistry.v1.TrustRegistry/CheckIssuerStatus\": grpclib.const.Handler( self.__rpc_check_issuer_status, grpclib.const.Cardinality.UNARY_UNARY, CheckIssuerStatusRequest, CheckIssuerStatusResponse, ), \"/services.trustregistry.v1.TrustRegistry/CheckVerifierStatus\":", "request.did_uri, \"x509_cert\": request.x509_cert, \"presentation_type_uri\": request.presentation_type_uri, \"valid_from_utc\": request.valid_from_utc, \"valid_until_utc\": request.valid_until_utc, \"governance_framework_uri\": request.governance_framework_uri, } response", "repr=False) class CheckIssuerStatusResponse(betterproto.Message): status: \"RegistrationStatus\" = betterproto.enum_field(1) @dataclass(eq=False, repr=False) class CheckVerifierStatusRequest(betterproto.Message): governance_framework_uri: str", "await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/RegisterVerifier\", request, RegisterVerifierResponse, ) async def unregister_issuer( self, *, did_uri: str", "\"__common_v1__.ResponseStatus\" = betterproto.enum_field(1) @dataclass(eq=False, repr=False) class RemoveFrameworkRequest(betterproto.Message): governance_framework: \"GovernanceFramework\" = betterproto.message_field(1) @dataclass(eq=False, repr=False)", "str = betterproto.string_field(3, group=\"member\") credential_type_uri: str = betterproto.string_field(4) @dataclass(eq=False, repr=False) class CheckIssuerStatusResponse(betterproto.Message): status:", "DO NOT EDIT! # sources: services/trust-registry/v1/trust-registry.proto # plugin: python-betterproto from dataclasses import dataclass", "str = betterproto.string_field(20) @dataclass(eq=False, repr=False) class UnregisterVerifierResponse(betterproto.Message): status: \"__common_v1__.ResponseStatus\" = betterproto.enum_field(1) @dataclass(eq=False, repr=False)", "request, RegisterIssuerResponse, ) async def register_verifier( self, *, did_uri: str = \"\", x509_cert:", "None: request.options = options return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/SearchRegistry\", request, SearchRegistryResponse, ) async def", "def unregister_verifier( self, did_uri: str, x509_cert: str, presentation_type_uri: str, governance_framework_uri: str, ) ->", "request.credential_type_uri, \"valid_from_utc\": request.valid_from_utc, \"valid_until_utc\": request.valid_until_utc, \"governance_framework_uri\": request.governance_framework_uri, } response = await self.register_issuer(**request_kwargs) await", "async def register_issuer( self, did_uri: str, x509_cert: str, credential_type_uri: str, valid_from_utc: int, valid_until_utc:", "\"GovernanceFramework\" = None ) -> \"AddFrameworkResponse\": request = AddFrameworkRequest() if governance_framework is not", "await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/CheckIssuerStatus\", request, CheckIssuerStatusResponse, ) async def check_verifier_status( self, *, governance_framework_uri: str", "stream.recv_message() request_kwargs = { \"did_uri\": request.did_uri, \"x509_cert\": request.x509_cert, \"credential_type_uri\": request.credential_type_uri, \"valid_from_utc\": request.valid_from_utc, \"valid_until_utc\":", "credential_type_uri request.governance_framework_uri = governance_framework_uri return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/UnregisterIssuer\", request, UnregisterIssuerResponse, ) async def", "x509_cert: request.x509_cert = x509_cert request.presentation_type_uri = presentation_type_uri return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/CheckVerifierStatus\", request, CheckVerifierStatusResponse,", "self.__rpc_check_verifier_status, grpclib.const.Cardinality.UNARY_UNARY, CheckVerifierStatusRequest, CheckVerifierStatusResponse, ), \"/services.trustregistry.v1.TrustRegistry/FetchData\": grpclib.const.Handler( self.__rpc_fetch_data, grpclib.const.Cardinality.UNARY_STREAM, FetchDataRequest, FetchDataResponse, ), }", "credential_type_uri: str = betterproto.string_field(10) governance_framework_uri: str = betterproto.string_field(20) @dataclass(eq=False, repr=False) class UnregisterIssuerResponse(betterproto.Message): status:", "\"\", did_uri: str = \"\", x509_cert: str = \"\", credential_type_uri: str = \"\",", "\"\", continuation_token: str = \"\", options: \"__common_v1__.RequestOptions\" = None, ) -> \"SearchRegistryResponse\": request", "betterproto.string_field(2) @dataclass(eq=False, repr=False) class FetchDataResponse(betterproto.Message): response_json: str = betterproto.string_field(1) has_more_results: bool = betterproto.bool_field(2)", "AddFrameworkResponse(betterproto.Message): status: \"__common_v1__.ResponseStatus\" = betterproto.enum_field(1) @dataclass(eq=False, repr=False) class RemoveFrameworkRequest(betterproto.Message): governance_framework: \"GovernanceFramework\" = betterproto.message_field(1)", "str = \"\", query: str = \"\" ) -> AsyncIterator[\"FetchDataResponse\"]: request = FetchDataRequest()", "\"\", x509_cert: str = \"\", presentation_type_uri: str = \"\", governance_framework_uri: str = \"\",", "\"/services.trustregistry.v1.TrustRegistry/UnregisterVerifier\", request, UnregisterVerifierResponse, ) async def check_issuer_status( self, *, governance_framework_uri: str = \"\",", "has_more_results: bool = betterproto.bool_field(2) continuation_token: str = betterproto.string_field(3) class TrustRegistryStub(betterproto.ServiceStub): async def add_framework(", "\"governance_framework_uri\": request.governance_framework_uri, } response = await self.register_verifier(**request_kwargs) await stream.send_message(response) async def __rpc_unregister_issuer(self, stream:", "await stream.send_message(response) async def __rpc_unregister_verifier(self, stream: grpclib.server.Stream) -> None: request = await stream.recv_message()", "request.x509_cert = x509_cert request.presentation_type_uri = presentation_type_uri request.valid_from_utc = valid_from_utc request.valid_until_utc = valid_until_utc request.governance_framework_uri", "def check_issuer_status( self, governance_framework_uri: str, did_uri: str, x509_cert: str, credential_type_uri: str, ) ->", "has_more: bool = betterproto.bool_field(2) count: int = betterproto.int32_field(3) continuation_token: str = betterproto.string_field(4) @dataclass(eq=False,", "async def register_verifier( self, did_uri: str, x509_cert: str, presentation_type_uri: str, valid_from_utc: int, valid_until_utc:", "int, valid_until_utc: int, governance_framework_uri: str, ) -> \"RegisterVerifierResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def unregister_issuer(", "= betterproto.string_field(2) options: \"__common_v1__.RequestOptions\" = betterproto.message_field(5) @dataclass(eq=False, repr=False) class SearchRegistryResponse(betterproto.Message): items_json: str =", "if governance_framework is not None: request.governance_framework = governance_framework return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/RemoveFramework\", request,", "governance_framework_uri: str, ) -> \"RegisterVerifierResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def unregister_issuer( self, did_uri: str,", "class UnregisterVerifierResponse(betterproto.Message): status: \"__common_v1__.ResponseStatus\" = betterproto.enum_field(1) @dataclass(eq=False, repr=False) class CheckIssuerStatusRequest(betterproto.Message): governance_framework_uri: str =", "str = \"\", presentation_type_uri: str = \"\", valid_from_utc: int = 0, valid_until_utc: int", "= governance_framework_uri request.query = query async for response in self._unary_stream( \"/services.trustregistry.v1.TrustRegistry/FetchData\", request, FetchDataResponse,", "\"options\": request.options, } response = await self.search_registry(**request_kwargs) await stream.send_message(response) async def __rpc_register_issuer(self, stream:", "register_verifier( self, *, did_uri: str = \"\", x509_cert: str = \"\", presentation_type_uri: str", "return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/SearchRegistry\", request, SearchRegistryResponse, ) async def register_issuer( self, *, did_uri:", "await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/AddFramework\", request, AddFrameworkResponse, ) async def remove_framework( self, *, governance_framework: \"GovernanceFramework\"", ") async def register_verifier( self, *, did_uri: str = \"\", x509_cert: str =", "SearchRegistryRequest, SearchRegistryResponse, ), \"/services.trustregistry.v1.TrustRegistry/RegisterIssuer\": grpclib.const.Handler( self.__rpc_register_issuer, grpclib.const.Cardinality.UNARY_UNARY, RegisterIssuerRequest, RegisterIssuerResponse, ), \"/services.trustregistry.v1.TrustRegistry/RegisterVerifier\": grpclib.const.Handler( self.__rpc_register_verifier,", ") -> \"RegisterVerifierResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def unregister_issuer( self, did_uri: str, x509_cert: str,", "request_kwargs = { \"governance_framework_uri\": request.governance_framework_uri, \"did_uri\": request.did_uri, \"x509_cert\": request.x509_cert, \"credential_type_uri\": request.credential_type_uri, } response", "NOT EDIT! # sources: services/trust-registry/v1/trust-registry.proto # plugin: python-betterproto from dataclasses import dataclass from", "request.governance_framework_uri = governance_framework_uri return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/UnregisterIssuer\", request, UnregisterIssuerResponse, ) async def unregister_verifier(", "\"__common_v1__.RequestOptions\" = betterproto.message_field(5) @dataclass(eq=False, repr=False) class SearchRegistryResponse(betterproto.Message): items_json: str = betterproto.string_field(1) has_more: bool", "\"GovernanceFramework\" ) -> \"AddFrameworkResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def remove_framework( self, governance_framework: \"GovernanceFramework\" )", "int = betterproto.uint64_field(12) governance_framework_uri: str = betterproto.string_field(20) @dataclass(eq=False, repr=False) class RegisterVerifierResponse(betterproto.Message): status: \"__common_v1__.ResponseStatus\"", "repr=False) class RegisterVerifierResponse(betterproto.Message): status: \"__common_v1__.ResponseStatus\" = betterproto.enum_field(1) @dataclass(eq=False, repr=False) class UnregisterIssuerRequest(betterproto.Message): did_uri: str", "did_uri: request.did_uri = did_uri if x509_cert: request.x509_cert = x509_cert request.presentation_type_uri = presentation_type_uri request.governance_framework_uri", "trust_registry_uri: str = betterproto.string_field(2) description: str = betterproto.string_field(3) @dataclass(eq=False, repr=False) class RegisterIssuerRequest(betterproto.Message): did_uri:", "by the protocol buffer compiler. DO NOT EDIT! # sources: services/trust-registry/v1/trust-registry.proto # plugin:", "governance_framework_uri: str = \"\", query: str = \"\" ) -> AsyncIterator[\"FetchDataResponse\"]: request =", "query: str = \"\", continuation_token: str = \"\", options: \"__common_v1__.RequestOptions\" = None, )", "= 10 @dataclass(eq=False, repr=False) class AddFrameworkRequest(betterproto.Message): governance_framework: \"GovernanceFramework\" = betterproto.message_field(1) @dataclass(eq=False, repr=False) class", "self.__rpc_check_issuer_status, grpclib.const.Cardinality.UNARY_UNARY, CheckIssuerStatusRequest, CheckIssuerStatusResponse, ), \"/services.trustregistry.v1.TrustRegistry/CheckVerifierStatus\": grpclib.const.Handler( self.__rpc_check_verifier_status, grpclib.const.Cardinality.UNARY_UNARY, CheckVerifierStatusRequest, CheckVerifierStatusResponse, ), \"/services.trustregistry.v1.TrustRegistry/FetchData\":", "= betterproto.string_field(2) @dataclass(eq=False, repr=False) class FetchDataResponse(betterproto.Message): response_json: str = betterproto.string_field(1) has_more_results: bool =", "grpclib.server.Stream) -> None: request = await stream.recv_message() request_kwargs = { \"did_uri\": request.did_uri, \"x509_cert\":", "class CheckIssuerStatusRequest(betterproto.Message): governance_framework_uri: str = betterproto.string_field(1) did_uri: str = betterproto.string_field(2, group=\"member\") x509_cert: str", "= betterproto.enum_field(1) @dataclass(eq=False, repr=False) class CheckVerifierStatusRequest(betterproto.Message): governance_framework_uri: str = betterproto.string_field(1) did_uri: str =", "betterproto.string_field(1) did_uri: str = betterproto.string_field(2, group=\"member\") x509_cert: str = betterproto.string_field(3, group=\"member\") presentation_type_uri: str", "\"did_uri\": request.did_uri, \"x509_cert\": request.x509_cert, \"presentation_type_uri\": request.presentation_type_uri, \"valid_from_utc\": request.valid_from_utc, \"valid_until_utc\": request.valid_until_utc, \"governance_framework_uri\": request.governance_framework_uri, }", "str = betterproto.string_field(2, group=\"authority\") credential_type_uri: str = betterproto.string_field(10) valid_from_utc: int = betterproto.uint64_field(11) valid_until_utc:", "# plugin: python-betterproto from dataclasses import dataclass from typing import AsyncIterator, Dict import", "str = betterproto.string_field(1) did_uri: str = betterproto.string_field(2, group=\"member\") x509_cert: str = betterproto.string_field(3, group=\"member\")", "betterproto.string_field(1) has_more: bool = betterproto.bool_field(2) count: int = betterproto.int32_field(3) continuation_token: str = betterproto.string_field(4)", "grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def fetch_data( self, governance_framework_uri: str, query: str ) -> AsyncIterator[\"FetchDataResponse\"]: raise", "\"x509_cert\": request.x509_cert, \"presentation_type_uri\": request.presentation_type_uri, \"governance_framework_uri\": request.governance_framework_uri, } response = await self.unregister_verifier(**request_kwargs) await stream.send_message(response)", "query: str, continuation_token: str, options: \"__common_v1__.RequestOptions\", ) -> \"SearchRegistryResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def", "= \"\", credential_type_uri: str = \"\", ) -> \"CheckIssuerStatusResponse\": request = CheckIssuerStatusRequest() request.governance_framework_uri", "@dataclass(eq=False, repr=False) class UnregisterIssuerRequest(betterproto.Message): did_uri: str = betterproto.string_field(1, group=\"authority\") x509_cert: str = betterproto.string_field(2,", "did_uri: str, x509_cert: str, credential_type_uri: str, valid_from_utc: int, valid_until_utc: int, governance_framework_uri: str, )", "self.fetch_data, stream, request_kwargs, ) def __mapping__(self) -> Dict[str, grpclib.const.Handler]: return { \"/services.trustregistry.v1.TrustRegistry/AddFramework\": grpclib.const.Handler(", "class TrustRegistryBase(ServiceBase): async def add_framework( self, governance_framework: \"GovernanceFramework\" ) -> \"AddFrameworkResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)", "response = await self.search_registry(**request_kwargs) await stream.send_message(response) async def __rpc_register_issuer(self, stream: grpclib.server.Stream) -> None:", "governance_framework_uri: str, did_uri: str, x509_cert: str, credential_type_uri: str, ) -> \"CheckIssuerStatusResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)", "request.governance_framework_uri, } response = await self.unregister_verifier(**request_kwargs) await stream.send_message(response) async def __rpc_check_issuer_status(self, stream: grpclib.server.Stream)", "betterproto.string_field(2, group=\"authority\") credential_type_uri: str = betterproto.string_field(10) valid_from_utc: int = betterproto.uint64_field(11) valid_until_utc: int =", "raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def __rpc_add_framework(self, stream: grpclib.server.Stream) -> None: request = await stream.recv_message()", "= betterproto.enum_field(1) @dataclass(eq=False, repr=False) class RemoveFrameworkRequest(betterproto.Message): governance_framework: \"GovernanceFramework\" = betterproto.message_field(1) @dataclass(eq=False, repr=False) class", "*, did_uri: str = \"\", x509_cert: str = \"\", credential_type_uri: str = \"\",", "grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def register_issuer( self, did_uri: str, x509_cert: str, credential_type_uri: str, valid_from_utc: int,", "= await stream.recv_message() request_kwargs = { \"did_uri\": request.did_uri, \"x509_cert\": request.x509_cert, \"credential_type_uri\": request.credential_type_uri, \"governance_framework_uri\":", "self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/CheckVerifierStatus\", request, CheckVerifierStatusResponse, ) async def fetch_data( self, *, governance_framework_uri: str =", "= RegisterVerifierRequest() if did_uri: request.did_uri = did_uri if x509_cert: request.x509_cert = x509_cert request.presentation_type_uri", "betterproto from betterproto.grpc.grpclib_server import ServiceBase import grpclib class RegistrationStatus(betterproto.Enum): CURRENT = 0 EXPIRED", "{ \"/services.trustregistry.v1.TrustRegistry/AddFramework\": grpclib.const.Handler( self.__rpc_add_framework, grpclib.const.Cardinality.UNARY_UNARY, AddFrameworkRequest, AddFrameworkResponse, ), \"/services.trustregistry.v1.TrustRegistry/RemoveFramework\": grpclib.const.Handler( self.__rpc_remove_framework, grpclib.const.Cardinality.UNARY_UNARY, RemoveFrameworkRequest,", "__rpc_remove_framework(self, stream: grpclib.server.Stream) -> None: request = await stream.recv_message() request_kwargs = { \"governance_framework\":", ") -> \"RegisterIssuerResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def register_verifier( self, did_uri: str, x509_cert: str,", "async def __rpc_check_issuer_status(self, stream: grpclib.server.Stream) -> None: request = await stream.recv_message() request_kwargs =", "= 0 EXPIRED = 1 TERMINATED = 2 REVOKED = 3 NOT_FOUND =", "\"did_uri\": request.did_uri, \"x509_cert\": request.x509_cert, \"credential_type_uri\": request.credential_type_uri, \"valid_from_utc\": request.valid_from_utc, \"valid_until_utc\": request.valid_until_utc, \"governance_framework_uri\": request.governance_framework_uri, }", "str, credential_type_uri: str, ) -> \"CheckIssuerStatusResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def check_verifier_status( self, governance_framework_uri:", "), \"/services.trustregistry.v1.TrustRegistry/RemoveFramework\": grpclib.const.Handler( self.__rpc_remove_framework, grpclib.const.Cardinality.UNARY_UNARY, RemoveFrameworkRequest, RemoveFrameworkResponse, ), \"/services.trustregistry.v1.TrustRegistry/SearchRegistry\": grpclib.const.Handler( self.__rpc_search_registry, grpclib.const.Cardinality.UNARY_UNARY, SearchRegistryRequest,", "request.valid_until_utc, \"governance_framework_uri\": request.governance_framework_uri, } response = await self.register_verifier(**request_kwargs) await stream.send_message(response) async def __rpc_unregister_issuer(self,", "bool = betterproto.bool_field(2) count: int = betterproto.int32_field(3) continuation_token: str = betterproto.string_field(4) @dataclass(eq=False, repr=False)", "request.continuation_token, \"options\": request.options, } response = await self.search_registry(**request_kwargs) await stream.send_message(response) async def __rpc_register_issuer(self,", "= \"\", governance_framework_uri: str = \"\", ) -> \"UnregisterIssuerResponse\": request = UnregisterIssuerRequest() if", "betterproto.int32_field(3) continuation_token: str = betterproto.string_field(4) @dataclass(eq=False, repr=False) class GovernanceFramework(betterproto.Message): governance_framework_uri: str = betterproto.string_field(1)", "CheckVerifierStatusResponse(betterproto.Message): status: \"RegistrationStatus\" = betterproto.enum_field(1) @dataclass(eq=False, repr=False) class FetchDataRequest(betterproto.Message): governance_framework_uri: str = betterproto.string_field(1)", "betterproto.string_field(2) options: \"__common_v1__.RequestOptions\" = betterproto.message_field(5) @dataclass(eq=False, repr=False) class SearchRegistryResponse(betterproto.Message): items_json: str = betterproto.string_field(1)", "= continuation_token if options is not None: request.options = options return await self._unary_unary(", "str = betterproto.string_field(3, group=\"member\") presentation_type_uri: str = betterproto.string_field(4) @dataclass(eq=False, repr=False) class CheckVerifierStatusResponse(betterproto.Message): status:", "did_uri if x509_cert: request.x509_cert = x509_cert request.credential_type_uri = credential_type_uri return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/CheckIssuerStatus\",", "0, valid_until_utc: int = 0, governance_framework_uri: str = \"\", ) -> \"RegisterVerifierResponse\": request", "RegisterVerifierRequest(betterproto.Message): did_uri: str = betterproto.string_field(1, group=\"authority\") x509_cert: str = betterproto.string_field(2, group=\"authority\") presentation_type_uri: str", "= did_uri if x509_cert: request.x509_cert = x509_cert request.credential_type_uri = credential_type_uri request.valid_from_utc = valid_from_utc", "def register_verifier( self, did_uri: str, x509_cert: str, presentation_type_uri: str, valid_from_utc: int, valid_until_utc: int,", "async def check_issuer_status( self, *, governance_framework_uri: str = \"\", did_uri: str = \"\",", "= credential_type_uri request.valid_from_utc = valid_from_utc request.valid_until_utc = valid_until_utc request.governance_framework_uri = governance_framework_uri return await", "request.valid_from_utc = valid_from_utc request.valid_until_utc = valid_until_utc request.governance_framework_uri = governance_framework_uri return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/RegisterVerifier\",", "= 0, governance_framework_uri: str = \"\", ) -> \"RegisterIssuerResponse\": request = RegisterIssuerRequest() if", "response = await self.register_issuer(**request_kwargs) await stream.send_message(response) async def __rpc_register_verifier(self, stream: grpclib.server.Stream) -> None:", "is not None: request.governance_framework = governance_framework return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/RemoveFramework\", request, RemoveFrameworkResponse, )", "SearchRegistryRequest(betterproto.Message): query: str = betterproto.string_field(1) continuation_token: str = betterproto.string_field(2) options: \"__common_v1__.RequestOptions\" = betterproto.message_field(5)", "= None ) -> \"AddFrameworkResponse\": request = AddFrameworkRequest() if governance_framework is not None:", "str = betterproto.string_field(20) @dataclass(eq=False, repr=False) class UnregisterIssuerResponse(betterproto.Message): status: \"__common_v1__.ResponseStatus\" = betterproto.enum_field(1) @dataclass(eq=False, repr=False)", "grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def register_verifier( self, did_uri: str, x509_cert: str, presentation_type_uri: str, valid_from_utc: int,", "raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def fetch_data( self, governance_framework_uri: str, query: str ) -> AsyncIterator[\"FetchDataResponse\"]:", "register_verifier( self, did_uri: str, x509_cert: str, presentation_type_uri: str, valid_from_utc: int, valid_until_utc: int, governance_framework_uri:", "AddFrameworkRequest() if governance_framework is not None: request.governance_framework = governance_framework return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/AddFramework\",", "request.did_uri, \"x509_cert\": request.x509_cert, \"presentation_type_uri\": request.presentation_type_uri, } response = await self.check_verifier_status(**request_kwargs) await stream.send_message(response) async", "presentation_type_uri: str, ) -> \"CheckVerifierStatusResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def fetch_data( self, governance_framework_uri: str,", "class RegisterVerifierRequest(betterproto.Message): did_uri: str = betterproto.string_field(1, group=\"authority\") x509_cert: str = betterproto.string_field(2, group=\"authority\") presentation_type_uri:", "-> \"RegisterVerifierResponse\": request = RegisterVerifierRequest() if did_uri: request.did_uri = did_uri if x509_cert: request.x509_cert", "int, governance_framework_uri: str, ) -> \"RegisterIssuerResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def register_verifier( self, did_uri:", "str, ) -> \"CheckIssuerStatusResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def check_verifier_status( self, governance_framework_uri: str, did_uri:", "= betterproto.enum_field(1) @dataclass(eq=False, repr=False) class CheckIssuerStatusRequest(betterproto.Message): governance_framework_uri: str = betterproto.string_field(1) did_uri: str =", "x509_cert: str, presentation_type_uri: str, governance_framework_uri: str, ) -> \"UnregisterVerifierResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def", "did_uri: request.did_uri = did_uri if x509_cert: request.x509_cert = x509_cert request.credential_type_uri = credential_type_uri request.governance_framework_uri", "grpclib.const.Handler]: return { \"/services.trustregistry.v1.TrustRegistry/AddFramework\": grpclib.const.Handler( self.__rpc_add_framework, grpclib.const.Cardinality.UNARY_UNARY, AddFrameworkRequest, AddFrameworkResponse, ), \"/services.trustregistry.v1.TrustRegistry/RemoveFramework\": grpclib.const.Handler( self.__rpc_remove_framework,", "request, RegisterVerifierResponse, ) async def unregister_issuer( self, *, did_uri: str = \"\", x509_cert:", "def add_framework( self, *, governance_framework: \"GovernanceFramework\" = None ) -> \"AddFrameworkResponse\": request =", "request, SearchRegistryResponse, ) async def register_issuer( self, *, did_uri: str = \"\", x509_cert:", "did_uri: str = betterproto.string_field(1, group=\"authority\") x509_cert: str = betterproto.string_field(2, group=\"authority\") credential_type_uri: str =", "TERMINATED = 2 REVOKED = 3 NOT_FOUND = 10 @dataclass(eq=False, repr=False) class AddFrameworkRequest(betterproto.Message):", "{ \"did_uri\": request.did_uri, \"x509_cert\": request.x509_cert, \"credential_type_uri\": request.credential_type_uri, \"governance_framework_uri\": request.governance_framework_uri, } response = await", "presentation_type_uri: str = \"\", governance_framework_uri: str = \"\", ) -> \"UnregisterVerifierResponse\": request =", "return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/RegisterVerifier\", request, RegisterVerifierResponse, ) async def unregister_issuer( self, *, did_uri:", "x509_cert: str = \"\", credential_type_uri: str = \"\", ) -> \"CheckIssuerStatusResponse\": request =", "request.did_uri, \"x509_cert\": request.x509_cert, \"credential_type_uri\": request.credential_type_uri, } response = await self.check_issuer_status(**request_kwargs) await stream.send_message(response) async", "= await self.register_issuer(**request_kwargs) await stream.send_message(response) async def __rpc_register_verifier(self, stream: grpclib.server.Stream) -> None: request", "governance_framework: \"GovernanceFramework\" ) -> \"AddFrameworkResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def remove_framework( self, governance_framework: \"GovernanceFramework\"", "= \"\", credential_type_uri: str = \"\", governance_framework_uri: str = \"\", ) -> \"UnregisterIssuerResponse\":", "valid_until_utc: int = betterproto.uint64_field(12) governance_framework_uri: str = betterproto.string_field(20) @dataclass(eq=False, repr=False) class RegisterVerifierResponse(betterproto.Message): status:", "await stream.recv_message() request_kwargs = { \"governance_framework_uri\": request.governance_framework_uri, \"did_uri\": request.did_uri, \"x509_cert\": request.x509_cert, \"presentation_type_uri\": request.presentation_type_uri,", "presentation_type_uri return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/CheckVerifierStatus\", request, CheckVerifierStatusResponse, ) async def fetch_data( self, *,", "<reponame>musaib072/sdk # Generated by the protocol buffer compiler. DO NOT EDIT! # sources:", "betterproto.string_field(20) @dataclass(eq=False, repr=False) class UnregisterIssuerResponse(betterproto.Message): status: \"__common_v1__.ResponseStatus\" = betterproto.enum_field(1) @dataclass(eq=False, repr=False) class UnregisterVerifierRequest(betterproto.Message):", "= valid_until_utc request.governance_framework_uri = governance_framework_uri return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/RegisterIssuer\", request, RegisterIssuerResponse, ) async", "self._call_rpc_handler_server_stream( self.fetch_data, stream, request_kwargs, ) def __mapping__(self) -> Dict[str, grpclib.const.Handler]: return { \"/services.trustregistry.v1.TrustRegistry/AddFramework\":", "EDIT! # sources: services/trust-registry/v1/trust-registry.proto # plugin: python-betterproto from dataclasses import dataclass from typing", ") async def search_registry( self, *, query: str = \"\", continuation_token: str =", "\"__common_v1__.RequestOptions\", ) -> \"SearchRegistryResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def register_issuer( self, did_uri: str, x509_cert:", "\"UnregisterIssuerResponse\": request = UnregisterIssuerRequest() if did_uri: request.did_uri = did_uri if x509_cert: request.x509_cert =", "request = await stream.recv_message() request_kwargs = { \"governance_framework\": request.governance_framework, } response = await", "self, governance_framework: \"GovernanceFramework\" ) -> \"RemoveFrameworkResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def search_registry( self, query:", "x509_cert: request.x509_cert = x509_cert request.credential_type_uri = credential_type_uri return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/CheckIssuerStatus\", request, CheckIssuerStatusResponse,", "await stream.recv_message() request_kwargs = { \"governance_framework\": request.governance_framework, } response = await self.remove_framework(**request_kwargs) await", "request.governance_framework, } response = await self.remove_framework(**request_kwargs) await stream.send_message(response) async def __rpc_search_registry(self, stream: grpclib.server.Stream)", "= did_uri if x509_cert: request.x509_cert = x509_cert request.credential_type_uri = credential_type_uri return await self._unary_unary(", "@dataclass(eq=False, repr=False) class UnregisterVerifierRequest(betterproto.Message): did_uri: str = betterproto.string_field(1, group=\"authority\") x509_cert: str = betterproto.string_field(2,", "\"governance_framework_uri\": request.governance_framework_uri, \"did_uri\": request.did_uri, \"x509_cert\": request.x509_cert, \"credential_type_uri\": request.credential_type_uri, } response = await self.check_issuer_status(**request_kwargs)", "str, presentation_type_uri: str, governance_framework_uri: str, ) -> \"UnregisterVerifierResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def check_issuer_status(", "request, UnregisterIssuerResponse, ) async def unregister_verifier( self, *, did_uri: str = \"\", x509_cert:", "-> AsyncIterator[\"FetchDataResponse\"]: request = FetchDataRequest() request.governance_framework_uri = governance_framework_uri request.query = query async for", "= did_uri if x509_cert: request.x509_cert = x509_cert request.presentation_type_uri = presentation_type_uri request.governance_framework_uri = governance_framework_uri", "= AddFrameworkRequest() if governance_framework is not None: request.governance_framework = governance_framework return await self._unary_unary(", "x509_cert: str = \"\", presentation_type_uri: str = \"\", governance_framework_uri: str = \"\", )", "RegisterIssuerResponse, ) async def register_verifier( self, *, did_uri: str = \"\", x509_cert: str", "in self._unary_stream( \"/services.trustregistry.v1.TrustRegistry/FetchData\", request, FetchDataResponse, ): yield response class TrustRegistryBase(ServiceBase): async def add_framework(", "@dataclass(eq=False, repr=False) class SearchRegistryRequest(betterproto.Message): query: str = betterproto.string_field(1) continuation_token: str = betterproto.string_field(2) options:", "str, did_uri: str, x509_cert: str, presentation_type_uri: str, ) -> \"CheckVerifierStatusResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async", "*, governance_framework: \"GovernanceFramework\" = None ) -> \"AddFrameworkResponse\": request = AddFrameworkRequest() if governance_framework", "str = \"\", x509_cert: str = \"\", presentation_type_uri: str = \"\", valid_from_utc: int", "TrustRegistryStub(betterproto.ServiceStub): async def add_framework( self, *, governance_framework: \"GovernanceFramework\" = None ) -> \"AddFrameworkResponse\":", "request = UnregisterIssuerRequest() if did_uri: request.did_uri = did_uri if x509_cert: request.x509_cert = x509_cert", "request.x509_cert, \"presentation_type_uri\": request.presentation_type_uri, } response = await self.check_verifier_status(**request_kwargs) await stream.send_message(response) async def __rpc_fetch_data(self,", "raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def remove_framework( self, governance_framework: \"GovernanceFramework\" ) -> \"RemoveFrameworkResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)", "__rpc_fetch_data(self, stream: grpclib.server.Stream) -> None: request = await stream.recv_message() request_kwargs = { \"governance_framework_uri\":", "= await stream.recv_message() request_kwargs = { \"governance_framework\": request.governance_framework, } response = await self.add_framework(**request_kwargs)", "presentation_type_uri: str, governance_framework_uri: str, ) -> \"UnregisterVerifierResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def check_issuer_status( self,", "\"presentation_type_uri\": request.presentation_type_uri, \"governance_framework_uri\": request.governance_framework_uri, } response = await self.unregister_verifier(**request_kwargs) await stream.send_message(response) async def", "request.query = query async for response in self._unary_stream( \"/services.trustregistry.v1.TrustRegistry/FetchData\", request, FetchDataResponse, ): yield", "plugin: python-betterproto from dataclasses import dataclass from typing import AsyncIterator, Dict import betterproto", "valid_until_utc: int = 0, governance_framework_uri: str = \"\", ) -> \"RegisterVerifierResponse\": request =", "-> AsyncIterator[\"FetchDataResponse\"]: raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def __rpc_add_framework(self, stream: grpclib.server.Stream) -> None: request =", "class RegisterVerifierResponse(betterproto.Message): status: \"__common_v1__.ResponseStatus\" = betterproto.enum_field(1) @dataclass(eq=False, repr=False) class UnregisterIssuerRequest(betterproto.Message): did_uri: str =", "x509_cert: str = betterproto.string_field(2, group=\"authority\") presentation_type_uri: str = betterproto.string_field(10) valid_from_utc: int = betterproto.uint64_field(11)", "governance_framework_uri return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/RegisterVerifier\", request, RegisterVerifierResponse, ) async def unregister_issuer( self, *,", "await stream.send_message(response) async def __rpc_register_issuer(self, stream: grpclib.server.Stream) -> None: request = await stream.recv_message()", "RegisterVerifierResponse, ) async def unregister_issuer( self, *, did_uri: str = \"\", x509_cert: str", "= { \"did_uri\": request.did_uri, \"x509_cert\": request.x509_cert, \"presentation_type_uri\": request.presentation_type_uri, \"governance_framework_uri\": request.governance_framework_uri, } response =", "await stream.recv_message() request_kwargs = { \"did_uri\": request.did_uri, \"x509_cert\": request.x509_cert, \"presentation_type_uri\": request.presentation_type_uri, \"governance_framework_uri\": request.governance_framework_uri,", "\"/services.trustregistry.v1.TrustRegistry/CheckVerifierStatus\": grpclib.const.Handler( self.__rpc_check_verifier_status, grpclib.const.Cardinality.UNARY_UNARY, CheckVerifierStatusRequest, CheckVerifierStatusResponse, ), \"/services.trustregistry.v1.TrustRegistry/FetchData\": grpclib.const.Handler( self.__rpc_fetch_data, grpclib.const.Cardinality.UNARY_STREAM, FetchDataRequest, FetchDataResponse,", "= { \"governance_framework_uri\": request.governance_framework_uri, \"did_uri\": request.did_uri, \"x509_cert\": request.x509_cert, \"credential_type_uri\": request.credential_type_uri, } response =", "def __rpc_check_verifier_status(self, stream: grpclib.server.Stream) -> None: request = await stream.recv_message() request_kwargs = {", "{ \"query\": request.query, \"continuation_token\": request.continuation_token, \"options\": request.options, } response = await self.search_registry(**request_kwargs) await", "EXPIRED = 1 TERMINATED = 2 REVOKED = 3 NOT_FOUND = 10 @dataclass(eq=False,", "str = \"\", credential_type_uri: str = \"\", governance_framework_uri: str = \"\", ) ->", "await stream.recv_message() request_kwargs = { \"query\": request.query, \"continuation_token\": request.continuation_token, \"options\": request.options, } response", "\"/services.trustregistry.v1.TrustRegistry/RegisterVerifier\", request, RegisterVerifierResponse, ) async def unregister_issuer( self, *, did_uri: str = \"\",", "betterproto.uint64_field(11) valid_until_utc: int = betterproto.uint64_field(12) governance_framework_uri: str = betterproto.string_field(20) @dataclass(eq=False, repr=False) class RegisterVerifierResponse(betterproto.Message):", "await stream.recv_message() request_kwargs = { \"governance_framework_uri\": request.governance_framework_uri, \"did_uri\": request.did_uri, \"x509_cert\": request.x509_cert, \"credential_type_uri\": request.credential_type_uri,", "compiler. DO NOT EDIT! # sources: services/trust-registry/v1/trust-registry.proto # plugin: python-betterproto from dataclasses import", ") -> \"AddFrameworkResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def remove_framework( self, governance_framework: \"GovernanceFramework\" ) ->", "valid_until_utc: int, governance_framework_uri: str, ) -> \"RegisterVerifierResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def unregister_issuer( self,", "= await stream.recv_message() request_kwargs = { \"did_uri\": request.did_uri, \"x509_cert\": request.x509_cert, \"credential_type_uri\": request.credential_type_uri, \"valid_from_utc\":", "betterproto.string_field(1) continuation_token: str = betterproto.string_field(2) options: \"__common_v1__.RequestOptions\" = betterproto.message_field(5) @dataclass(eq=False, repr=False) class SearchRegistryResponse(betterproto.Message):", "self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/UnregisterIssuer\", request, UnregisterIssuerResponse, ) async def unregister_verifier( self, *, did_uri: str =", "request.x509_cert, \"presentation_type_uri\": request.presentation_type_uri, \"valid_from_utc\": request.valid_from_utc, \"valid_until_utc\": request.valid_until_utc, \"governance_framework_uri\": request.governance_framework_uri, } response = await", "request_kwargs, ) def __mapping__(self) -> Dict[str, grpclib.const.Handler]: return { \"/services.trustregistry.v1.TrustRegistry/AddFramework\": grpclib.const.Handler( self.__rpc_add_framework, grpclib.const.Cardinality.UNARY_UNARY,", "str = betterproto.string_field(20) @dataclass(eq=False, repr=False) class RegisterIssuerResponse(betterproto.Message): status: \"__common_v1__.ResponseStatus\" = betterproto.enum_field(1) @dataclass(eq=False, repr=False)", "did_uri: request.did_uri = did_uri if x509_cert: request.x509_cert = x509_cert request.presentation_type_uri = presentation_type_uri return", "int = betterproto.uint64_field(11) valid_until_utc: int = betterproto.uint64_field(12) governance_framework_uri: str = betterproto.string_field(20) @dataclass(eq=False, repr=False)", ") -> \"CheckVerifierStatusResponse\": request = CheckVerifierStatusRequest() request.governance_framework_uri = governance_framework_uri if did_uri: request.did_uri =", "= \"\" ) -> AsyncIterator[\"FetchDataResponse\"]: request = FetchDataRequest() request.governance_framework_uri = governance_framework_uri request.query =", "governance_framework_uri: str = betterproto.string_field(20) @dataclass(eq=False, repr=False) class UnregisterVerifierResponse(betterproto.Message): status: \"__common_v1__.ResponseStatus\" = betterproto.enum_field(1) @dataclass(eq=False,", "self.add_framework(**request_kwargs) await stream.send_message(response) async def __rpc_remove_framework(self, stream: grpclib.server.Stream) -> None: request = await", "return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/AddFramework\", request, AddFrameworkResponse, ) async def remove_framework( self, *, governance_framework:", ") -> \"UnregisterVerifierResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def check_issuer_status( self, governance_framework_uri: str, did_uri: str,", "request.governance_framework_uri = governance_framework_uri request.query = query async for response in self._unary_stream( \"/services.trustregistry.v1.TrustRegistry/FetchData\", request,", "\"did_uri\": request.did_uri, \"x509_cert\": request.x509_cert, \"presentation_type_uri\": request.presentation_type_uri, } response = await self.check_verifier_status(**request_kwargs) await stream.send_message(response)", ") -> \"UnregisterIssuerResponse\": request = UnregisterIssuerRequest() if did_uri: request.did_uri = did_uri if x509_cert:", "str = \"\", presentation_type_uri: str = \"\", governance_framework_uri: str = \"\", ) ->", "request.valid_until_utc, \"governance_framework_uri\": request.governance_framework_uri, } response = await self.register_issuer(**request_kwargs) await stream.send_message(response) async def __rpc_register_verifier(self,", "= valid_from_utc request.valid_until_utc = valid_until_utc request.governance_framework_uri = governance_framework_uri return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/RegisterVerifier\", request,", "CURRENT = 0 EXPIRED = 1 TERMINATED = 2 REVOKED = 3 NOT_FOUND", "repr=False) class FetchDataRequest(betterproto.Message): governance_framework_uri: str = betterproto.string_field(1) query: str = betterproto.string_field(2) @dataclass(eq=False, repr=False)", "None ) -> \"AddFrameworkResponse\": request = AddFrameworkRequest() if governance_framework is not None: request.governance_framework", "def __rpc_register_verifier(self, stream: grpclib.server.Stream) -> None: request = await stream.recv_message() request_kwargs = {", "repr=False) class UnregisterVerifierResponse(betterproto.Message): status: \"__common_v1__.ResponseStatus\" = betterproto.enum_field(1) @dataclass(eq=False, repr=False) class CheckIssuerStatusRequest(betterproto.Message): governance_framework_uri: str", "x509_cert: request.x509_cert = x509_cert request.presentation_type_uri = presentation_type_uri request.governance_framework_uri = governance_framework_uri return await self._unary_unary(", "def remove_framework( self, governance_framework: \"GovernanceFramework\" ) -> \"RemoveFrameworkResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def search_registry(", "x509_cert: str, credential_type_uri: str, governance_framework_uri: str, ) -> \"UnregisterIssuerResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def", "did_uri: str = betterproto.string_field(2, group=\"member\") x509_cert: str = betterproto.string_field(3, group=\"member\") presentation_type_uri: str =", "add_framework( self, governance_framework: \"GovernanceFramework\" ) -> \"AddFrameworkResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def remove_framework( self,", "0, governance_framework_uri: str = \"\", ) -> \"RegisterVerifierResponse\": request = RegisterVerifierRequest() if did_uri:", "RegistrationStatus(betterproto.Enum): CURRENT = 0 EXPIRED = 1 TERMINATED = 2 REVOKED = 3", "grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def __rpc_add_framework(self, stream: grpclib.server.Stream) -> None: request = await stream.recv_message() request_kwargs", "from dataclasses import dataclass from typing import AsyncIterator, Dict import betterproto from betterproto.grpc.grpclib_server", "str = betterproto.string_field(2) description: str = betterproto.string_field(3) @dataclass(eq=False, repr=False) class RegisterIssuerRequest(betterproto.Message): did_uri: str", "\"\", ) -> \"UnregisterVerifierResponse\": request = UnregisterVerifierRequest() if did_uri: request.did_uri = did_uri if", "= betterproto.string_field(2, group=\"authority\") credential_type_uri: str = betterproto.string_field(10) governance_framework_uri: str = betterproto.string_field(20) @dataclass(eq=False, repr=False)", "str = \"\", credential_type_uri: str = \"\", ) -> \"CheckIssuerStatusResponse\": request = CheckIssuerStatusRequest()", "raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def check_issuer_status( self, governance_framework_uri: str, did_uri: str, x509_cert: str, credential_type_uri:", "self, *, did_uri: str = \"\", x509_cert: str = \"\", credential_type_uri: str =", "governance_framework_uri: str, ) -> \"UnregisterIssuerResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def unregister_verifier( self, did_uri: str,", "class AddFrameworkRequest(betterproto.Message): governance_framework: \"GovernanceFramework\" = betterproto.message_field(1) @dataclass(eq=False, repr=False) class AddFrameworkResponse(betterproto.Message): status: \"__common_v1__.ResponseStatus\" =", "status: \"RegistrationStatus\" = betterproto.enum_field(1) @dataclass(eq=False, repr=False) class FetchDataRequest(betterproto.Message): governance_framework_uri: str = betterproto.string_field(1) query:", "presentation_type_uri: str = betterproto.string_field(4) @dataclass(eq=False, repr=False) class CheckVerifierStatusResponse(betterproto.Message): status: \"RegistrationStatus\" = betterproto.enum_field(1) @dataclass(eq=False,", "def __rpc_check_issuer_status(self, stream: grpclib.server.Stream) -> None: request = await stream.recv_message() request_kwargs = {", "\"governance_framework_uri\": request.governance_framework_uri, } response = await self.register_issuer(**request_kwargs) await stream.send_message(response) async def __rpc_register_verifier(self, stream:", "CheckIssuerStatusResponse, ) async def check_verifier_status( self, *, governance_framework_uri: str = \"\", did_uri: str", "} await self._call_rpc_handler_server_stream( self.fetch_data, stream, request_kwargs, ) def __mapping__(self) -> Dict[str, grpclib.const.Handler]: return", "betterproto.message_field(1) @dataclass(eq=False, repr=False) class AddFrameworkResponse(betterproto.Message): status: \"__common_v1__.ResponseStatus\" = betterproto.enum_field(1) @dataclass(eq=False, repr=False) class RemoveFrameworkRequest(betterproto.Message):", "= governance_framework_uri return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/UnregisterIssuer\", request, UnregisterIssuerResponse, ) async def unregister_verifier( self,", "presentation_type_uri: str, valid_from_utc: int, valid_until_utc: int, governance_framework_uri: str, ) -> \"RegisterVerifierResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)", "group=\"authority\") x509_cert: str = betterproto.string_field(2, group=\"authority\") presentation_type_uri: str = betterproto.string_field(10) governance_framework_uri: str =", "class FetchDataResponse(betterproto.Message): response_json: str = betterproto.string_field(1) has_more_results: bool = betterproto.bool_field(2) continuation_token: str =", "await self.remove_framework(**request_kwargs) await stream.send_message(response) async def __rpc_search_registry(self, stream: grpclib.server.Stream) -> None: request =", "async def __rpc_search_registry(self, stream: grpclib.server.Stream) -> None: request = await stream.recv_message() request_kwargs =", "request.governance_framework_uri, \"did_uri\": request.did_uri, \"x509_cert\": request.x509_cert, \"credential_type_uri\": request.credential_type_uri, } response = await self.check_issuer_status(**request_kwargs) await", "governance_framework: \"GovernanceFramework\" = None ) -> \"AddFrameworkResponse\": request = AddFrameworkRequest() if governance_framework is", "\"/services.trustregistry.v1.TrustRegistry/UnregisterVerifier\": grpclib.const.Handler( self.__rpc_unregister_verifier, grpclib.const.Cardinality.UNARY_UNARY, UnregisterVerifierRequest, UnregisterVerifierResponse, ), \"/services.trustregistry.v1.TrustRegistry/CheckIssuerStatus\": grpclib.const.Handler( self.__rpc_check_issuer_status, grpclib.const.Cardinality.UNARY_UNARY, CheckIssuerStatusRequest, CheckIssuerStatusResponse,", "if x509_cert: request.x509_cert = x509_cert request.credential_type_uri = credential_type_uri request.valid_from_utc = valid_from_utc request.valid_until_utc =", "self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/UnregisterVerifier\", request, UnregisterVerifierResponse, ) async def check_issuer_status( self, *, governance_framework_uri: str =", "grpclib.server.Stream) -> None: request = await stream.recv_message() request_kwargs = { \"governance_framework_uri\": request.governance_framework_uri, \"did_uri\":", ") -> \"CheckIssuerStatusResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def check_verifier_status( self, governance_framework_uri: str, did_uri: str,", "= betterproto.enum_field(1) @dataclass(eq=False, repr=False) class UnregisterIssuerRequest(betterproto.Message): did_uri: str = betterproto.string_field(1, group=\"authority\") x509_cert: str", "betterproto.uint64_field(12) governance_framework_uri: str = betterproto.string_field(20) @dataclass(eq=False, repr=False) class RegisterVerifierResponse(betterproto.Message): status: \"__common_v1__.ResponseStatus\" = betterproto.enum_field(1)", "self.check_verifier_status(**request_kwargs) await stream.send_message(response) async def __rpc_fetch_data(self, stream: grpclib.server.Stream) -> None: request = await", "async for response in self._unary_stream( \"/services.trustregistry.v1.TrustRegistry/FetchData\", request, FetchDataResponse, ): yield response class TrustRegistryBase(ServiceBase):", "UnregisterIssuerRequest, UnregisterIssuerResponse, ), \"/services.trustregistry.v1.TrustRegistry/UnregisterVerifier\": grpclib.const.Handler( self.__rpc_unregister_verifier, grpclib.const.Cardinality.UNARY_UNARY, UnregisterVerifierRequest, UnregisterVerifierResponse, ), \"/services.trustregistry.v1.TrustRegistry/CheckIssuerStatus\": grpclib.const.Handler( self.__rpc_check_issuer_status,", "self.__rpc_add_framework, grpclib.const.Cardinality.UNARY_UNARY, AddFrameworkRequest, AddFrameworkResponse, ), \"/services.trustregistry.v1.TrustRegistry/RemoveFramework\": grpclib.const.Handler( self.__rpc_remove_framework, grpclib.const.Cardinality.UNARY_UNARY, RemoveFrameworkRequest, RemoveFrameworkResponse, ), \"/services.trustregistry.v1.TrustRegistry/SearchRegistry\":", "= { \"governance_framework\": request.governance_framework, } response = await self.remove_framework(**request_kwargs) await stream.send_message(response) async def", "request.options, } response = await self.search_registry(**request_kwargs) await stream.send_message(response) async def __rpc_register_issuer(self, stream: grpclib.server.Stream)", "dataclass from typing import AsyncIterator, Dict import betterproto from betterproto.grpc.grpclib_server import ServiceBase import", "= credential_type_uri request.governance_framework_uri = governance_framework_uri return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/UnregisterIssuer\", request, UnregisterIssuerResponse, ) async", "def check_verifier_status( self, governance_framework_uri: str, did_uri: str, x509_cert: str, presentation_type_uri: str, ) ->", "repr=False) class SearchRegistryResponse(betterproto.Message): items_json: str = betterproto.string_field(1) has_more: bool = betterproto.bool_field(2) count: int", "= betterproto.uint64_field(12) governance_framework_uri: str = betterproto.string_field(20) @dataclass(eq=False, repr=False) class RegisterVerifierResponse(betterproto.Message): status: \"__common_v1__.ResponseStatus\" =", "self.search_registry(**request_kwargs) await stream.send_message(response) async def __rpc_register_issuer(self, stream: grpclib.server.Stream) -> None: request = await", "async def unregister_verifier( self, did_uri: str, x509_cert: str, presentation_type_uri: str, governance_framework_uri: str, )", "await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/RegisterIssuer\", request, RegisterIssuerResponse, ) async def register_verifier( self, *, did_uri: str", "str, x509_cert: str, credential_type_uri: str, governance_framework_uri: str, ) -> \"UnregisterIssuerResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async", "-> \"CheckIssuerStatusResponse\": request = CheckIssuerStatusRequest() request.governance_framework_uri = governance_framework_uri if did_uri: request.did_uri = did_uri", "AddFrameworkRequest, AddFrameworkResponse, ), \"/services.trustregistry.v1.TrustRegistry/RemoveFramework\": grpclib.const.Handler( self.__rpc_remove_framework, grpclib.const.Cardinality.UNARY_UNARY, RemoveFrameworkRequest, RemoveFrameworkResponse, ), \"/services.trustregistry.v1.TrustRegistry/SearchRegistry\": grpclib.const.Handler( self.__rpc_search_registry,", "return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/CheckIssuerStatus\", request, CheckIssuerStatusResponse, ) async def check_verifier_status( self, *, governance_framework_uri:", "request.did_uri = did_uri if x509_cert: request.x509_cert = x509_cert request.presentation_type_uri = presentation_type_uri request.valid_from_utc =", "def add_framework( self, governance_framework: \"GovernanceFramework\" ) -> \"AddFrameworkResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def remove_framework(", "import betterproto from betterproto.grpc.grpclib_server import ServiceBase import grpclib class RegistrationStatus(betterproto.Enum): CURRENT = 0", "@dataclass(eq=False, repr=False) class FetchDataResponse(betterproto.Message): response_json: str = betterproto.string_field(1) has_more_results: bool = betterproto.bool_field(2) continuation_token:", "= \"\", x509_cert: str = \"\", presentation_type_uri: str = \"\", governance_framework_uri: str =", "self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/RegisterIssuer\", request, RegisterIssuerResponse, ) async def register_verifier( self, *, did_uri: str =", "@dataclass(eq=False, repr=False) class RegisterVerifierRequest(betterproto.Message): did_uri: str = betterproto.string_field(1, group=\"authority\") x509_cert: str = betterproto.string_field(2,", "RemoveFrameworkResponse(betterproto.Message): status: \"__common_v1__.ResponseStatus\" = betterproto.enum_field(1) @dataclass(eq=False, repr=False) class SearchRegistryRequest(betterproto.Message): query: str = betterproto.string_field(1)", "self.register_issuer(**request_kwargs) await stream.send_message(response) async def __rpc_register_verifier(self, stream: grpclib.server.Stream) -> None: request = await", "governance_framework is not None: request.governance_framework = governance_framework return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/RemoveFramework\", request, RemoveFrameworkResponse,", "if did_uri: request.did_uri = did_uri if x509_cert: request.x509_cert = x509_cert request.credential_type_uri = credential_type_uri", "\"governance_framework_uri\": request.governance_framework_uri, } response = await self.unregister_verifier(**request_kwargs) await stream.send_message(response) async def __rpc_check_issuer_status(self, stream:", "the protocol buffer compiler. DO NOT EDIT! # sources: services/trust-registry/v1/trust-registry.proto # plugin: python-betterproto", "\"/services.trustregistry.v1.TrustRegistry/CheckIssuerStatus\", request, CheckIssuerStatusResponse, ) async def check_verifier_status( self, *, governance_framework_uri: str = \"\",", "\"/services.trustregistry.v1.TrustRegistry/RegisterIssuer\": grpclib.const.Handler( self.__rpc_register_issuer, grpclib.const.Cardinality.UNARY_UNARY, RegisterIssuerRequest, RegisterIssuerResponse, ), \"/services.trustregistry.v1.TrustRegistry/RegisterVerifier\": grpclib.const.Handler( self.__rpc_register_verifier, grpclib.const.Cardinality.UNARY_UNARY, RegisterVerifierRequest, RegisterVerifierResponse,", "\"__common_v1__.ResponseStatus\" = betterproto.enum_field(1) @dataclass(eq=False, repr=False) class RegisterVerifierRequest(betterproto.Message): did_uri: str = betterproto.string_field(1, group=\"authority\") x509_cert:", "valid_from_utc: int = 0, valid_until_utc: int = 0, governance_framework_uri: str = \"\", )", "SearchRegistryResponse, ) async def register_issuer( self, *, did_uri: str = \"\", x509_cert: str", "credential_type_uri return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/CheckIssuerStatus\", request, CheckIssuerStatusResponse, ) async def check_verifier_status( self, *,", "@dataclass(eq=False, repr=False) class CheckVerifierStatusRequest(betterproto.Message): governance_framework_uri: str = betterproto.string_field(1) did_uri: str = betterproto.string_field(2, group=\"member\")", "sources: services/trust-registry/v1/trust-registry.proto # plugin: python-betterproto from dataclasses import dataclass from typing import AsyncIterator,", "= governance_framework_uri return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/RegisterIssuer\", request, RegisterIssuerResponse, ) async def register_verifier( self,", "\"continuation_token\": request.continuation_token, \"options\": request.options, } response = await self.search_registry(**request_kwargs) await stream.send_message(response) async def", "did_uri: str = \"\", x509_cert: str = \"\", presentation_type_uri: str = \"\", governance_framework_uri:", "\"/services.trustregistry.v1.TrustRegistry/RemoveFramework\", request, RemoveFrameworkResponse, ) async def search_registry( self, *, query: str = \"\",", "betterproto.string_field(1, group=\"authority\") x509_cert: str = betterproto.string_field(2, group=\"authority\") presentation_type_uri: str = betterproto.string_field(10) governance_framework_uri: str", "group=\"authority\") x509_cert: str = betterproto.string_field(2, group=\"authority\") credential_type_uri: str = betterproto.string_field(10) governance_framework_uri: str =", "async def check_issuer_status( self, governance_framework_uri: str, did_uri: str, x509_cert: str, credential_type_uri: str, )", "request = FetchDataRequest() request.governance_framework_uri = governance_framework_uri request.query = query async for response in", "self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/AddFramework\", request, AddFrameworkResponse, ) async def remove_framework( self, *, governance_framework: \"GovernanceFramework\" =", "self, *, governance_framework_uri: str = \"\", did_uri: str = \"\", x509_cert: str =", "betterproto.string_field(2) description: str = betterproto.string_field(3) @dataclass(eq=False, repr=False) class RegisterIssuerRequest(betterproto.Message): did_uri: str = betterproto.string_field(1,", "x509_cert request.presentation_type_uri = presentation_type_uri request.governance_framework_uri = governance_framework_uri return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/UnregisterVerifier\", request, UnregisterVerifierResponse,", "= await self.unregister_issuer(**request_kwargs) await stream.send_message(response) async def __rpc_unregister_verifier(self, stream: grpclib.server.Stream) -> None: request", "str = betterproto.string_field(20) @dataclass(eq=False, repr=False) class RegisterVerifierResponse(betterproto.Message): status: \"__common_v1__.ResponseStatus\" = betterproto.enum_field(1) @dataclass(eq=False, repr=False)", "self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/RegisterVerifier\", request, RegisterVerifierResponse, ) async def unregister_issuer( self, *, did_uri: str =", "betterproto.uint64_field(11) valid_until_utc: int = betterproto.uint64_field(12) governance_framework_uri: str = betterproto.string_field(20) @dataclass(eq=False, repr=False) class RegisterIssuerResponse(betterproto.Message):", "class UnregisterIssuerResponse(betterproto.Message): status: \"__common_v1__.ResponseStatus\" = betterproto.enum_field(1) @dataclass(eq=False, repr=False) class UnregisterVerifierRequest(betterproto.Message): did_uri: str =", "= { \"did_uri\": request.did_uri, \"x509_cert\": request.x509_cert, \"presentation_type_uri\": request.presentation_type_uri, \"valid_from_utc\": request.valid_from_utc, \"valid_until_utc\": request.valid_until_utc, \"governance_framework_uri\":", "# Generated by the protocol buffer compiler. DO NOT EDIT! # sources: services/trust-registry/v1/trust-registry.proto", "-> None: request = await stream.recv_message() request_kwargs = { \"did_uri\": request.did_uri, \"x509_cert\": request.x509_cert,", "add_framework( self, *, governance_framework: \"GovernanceFramework\" = None ) -> \"AddFrameworkResponse\": request = AddFrameworkRequest()", "= await stream.recv_message() request_kwargs = { \"did_uri\": request.did_uri, \"x509_cert\": request.x509_cert, \"presentation_type_uri\": request.presentation_type_uri, \"valid_from_utc\":", "request = await stream.recv_message() request_kwargs = { \"did_uri\": request.did_uri, \"x509_cert\": request.x509_cert, \"presentation_type_uri\": request.presentation_type_uri,", ") -> \"RemoveFrameworkResponse\": request = RemoveFrameworkRequest() if governance_framework is not None: request.governance_framework =", "= betterproto.string_field(1) trust_registry_uri: str = betterproto.string_field(2) description: str = betterproto.string_field(3) @dataclass(eq=False, repr=False) class", ") -> \"RegisterVerifierResponse\": request = RegisterVerifierRequest() if did_uri: request.did_uri = did_uri if x509_cert:", "self, *, did_uri: str = \"\", x509_cert: str = \"\", presentation_type_uri: str =", "{ \"governance_framework_uri\": request.governance_framework_uri, \"did_uri\": request.did_uri, \"x509_cert\": request.x509_cert, \"credential_type_uri\": request.credential_type_uri, } response = await", "= \"\", presentation_type_uri: str = \"\", ) -> \"CheckVerifierStatusResponse\": request = CheckVerifierStatusRequest() request.governance_framework_uri", "async def add_framework( self, governance_framework: \"GovernanceFramework\" ) -> \"AddFrameworkResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def", "\"/services.trustregistry.v1.TrustRegistry/UnregisterIssuer\", request, UnregisterIssuerResponse, ) async def unregister_verifier( self, *, did_uri: str = \"\",", "unregister_issuer( self, *, did_uri: str = \"\", x509_cert: str = \"\", credential_type_uri: str", "self, did_uri: str, x509_cert: str, credential_type_uri: str, governance_framework_uri: str, ) -> \"UnregisterIssuerResponse\": raise", "group=\"authority\") credential_type_uri: str = betterproto.string_field(10) valid_from_utc: int = betterproto.uint64_field(11) valid_until_utc: int = betterproto.uint64_field(12)", "stream.send_message(response) async def __rpc_unregister_issuer(self, stream: grpclib.server.Stream) -> None: request = await stream.recv_message() request_kwargs", "if x509_cert: request.x509_cert = x509_cert request.presentation_type_uri = presentation_type_uri request.valid_from_utc = valid_from_utc request.valid_until_utc =", "= await stream.recv_message() request_kwargs = { \"query\": request.query, \"continuation_token\": request.continuation_token, \"options\": request.options, }", "= None, ) -> \"SearchRegistryResponse\": request = SearchRegistryRequest() request.query = query request.continuation_token =", "str = betterproto.string_field(2, group=\"authority\") credential_type_uri: str = betterproto.string_field(10) governance_framework_uri: str = betterproto.string_field(20) @dataclass(eq=False,", "import AsyncIterator, Dict import betterproto from betterproto.grpc.grpclib_server import ServiceBase import grpclib class RegistrationStatus(betterproto.Enum):", "betterproto.message_field(1) @dataclass(eq=False, repr=False) class RemoveFrameworkResponse(betterproto.Message): status: \"__common_v1__.ResponseStatus\" = betterproto.enum_field(1) @dataclass(eq=False, repr=False) class SearchRegistryRequest(betterproto.Message):", "def check_verifier_status( self, *, governance_framework_uri: str = \"\", did_uri: str = \"\", x509_cert:", "= betterproto.string_field(20) @dataclass(eq=False, repr=False) class UnregisterIssuerResponse(betterproto.Message): status: \"__common_v1__.ResponseStatus\" = betterproto.enum_field(1) @dataclass(eq=False, repr=False) class", "str, x509_cert: str, presentation_type_uri: str, valid_from_utc: int, valid_until_utc: int, governance_framework_uri: str, ) ->", "betterproto.enum_field(1) @dataclass(eq=False, repr=False) class RemoveFrameworkRequest(betterproto.Message): governance_framework: \"GovernanceFramework\" = betterproto.message_field(1) @dataclass(eq=False, repr=False) class RemoveFrameworkResponse(betterproto.Message):", "async def search_registry( self, query: str, continuation_token: str, options: \"__common_v1__.RequestOptions\", ) -> \"SearchRegistryResponse\":", "grpclib class RegistrationStatus(betterproto.Enum): CURRENT = 0 EXPIRED = 1 TERMINATED = 2 REVOKED", "register_issuer( self, did_uri: str, x509_cert: str, credential_type_uri: str, valid_from_utc: int, valid_until_utc: int, governance_framework_uri:", "betterproto.string_field(10) governance_framework_uri: str = betterproto.string_field(20) @dataclass(eq=False, repr=False) class UnregisterIssuerResponse(betterproto.Message): status: \"__common_v1__.ResponseStatus\" = betterproto.enum_field(1)", "str = \"\", ) -> \"RegisterVerifierResponse\": request = RegisterVerifierRequest() if did_uri: request.did_uri =", "\"GovernanceFramework\" = betterproto.message_field(1) @dataclass(eq=False, repr=False) class AddFrameworkResponse(betterproto.Message): status: \"__common_v1__.ResponseStatus\" = betterproto.enum_field(1) @dataclass(eq=False, repr=False)", "UnregisterVerifierRequest(betterproto.Message): did_uri: str = betterproto.string_field(1, group=\"authority\") x509_cert: str = betterproto.string_field(2, group=\"authority\") presentation_type_uri: str", "@dataclass(eq=False, repr=False) class AddFrameworkRequest(betterproto.Message): governance_framework: \"GovernanceFramework\" = betterproto.message_field(1) @dataclass(eq=False, repr=False) class AddFrameworkResponse(betterproto.Message): status:", "= await stream.recv_message() request_kwargs = { \"did_uri\": request.did_uri, \"x509_cert\": request.x509_cert, \"presentation_type_uri\": request.presentation_type_uri, \"governance_framework_uri\":", "\"\", ) -> \"UnregisterIssuerResponse\": request = UnregisterIssuerRequest() if did_uri: request.did_uri = did_uri if", "request.governance_framework_uri, \"did_uri\": request.did_uri, \"x509_cert\": request.x509_cert, \"presentation_type_uri\": request.presentation_type_uri, } response = await self.check_verifier_status(**request_kwargs) await", "\"UnregisterVerifierResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def check_issuer_status( self, governance_framework_uri: str, did_uri: str, x509_cert: str,", "request.governance_framework, } response = await self.add_framework(**request_kwargs) await stream.send_message(response) async def __rpc_remove_framework(self, stream: grpclib.server.Stream)", "RemoveFrameworkRequest, RemoveFrameworkResponse, ), \"/services.trustregistry.v1.TrustRegistry/SearchRegistry\": grpclib.const.Handler( self.__rpc_search_registry, grpclib.const.Cardinality.UNARY_UNARY, SearchRegistryRequest, SearchRegistryResponse, ), \"/services.trustregistry.v1.TrustRegistry/RegisterIssuer\": grpclib.const.Handler( self.__rpc_register_issuer,", "yield response class TrustRegistryBase(ServiceBase): async def add_framework( self, governance_framework: \"GovernanceFramework\" ) -> \"AddFrameworkResponse\":", "= betterproto.string_field(2) description: str = betterproto.string_field(3) @dataclass(eq=False, repr=False) class RegisterIssuerRequest(betterproto.Message): did_uri: str =", "__rpc_unregister_verifier(self, stream: grpclib.server.Stream) -> None: request = await stream.recv_message() request_kwargs = { \"did_uri\":", "SearchRegistryRequest() request.query = query request.continuation_token = continuation_token if options is not None: request.options", "str = \"\", ) -> \"RegisterIssuerResponse\": request = RegisterIssuerRequest() if did_uri: request.did_uri =", "x509_cert: str, presentation_type_uri: str, ) -> \"CheckVerifierStatusResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def fetch_data( self,", "response in self._unary_stream( \"/services.trustregistry.v1.TrustRegistry/FetchData\", request, FetchDataResponse, ): yield response class TrustRegistryBase(ServiceBase): async def", "request.credential_type_uri = credential_type_uri request.valid_from_utc = valid_from_utc request.valid_until_utc = valid_until_utc request.governance_framework_uri = governance_framework_uri return", "= await self.register_verifier(**request_kwargs) await stream.send_message(response) async def __rpc_unregister_issuer(self, stream: grpclib.server.Stream) -> None: request", "= betterproto.string_field(1) continuation_token: str = betterproto.string_field(2) options: \"__common_v1__.RequestOptions\" = betterproto.message_field(5) @dataclass(eq=False, repr=False) class", "class CheckIssuerStatusResponse(betterproto.Message): status: \"RegistrationStatus\" = betterproto.enum_field(1) @dataclass(eq=False, repr=False) class CheckVerifierStatusRequest(betterproto.Message): governance_framework_uri: str =", "CheckIssuerStatusResponse, ), \"/services.trustregistry.v1.TrustRegistry/CheckVerifierStatus\": grpclib.const.Handler( self.__rpc_check_verifier_status, grpclib.const.Cardinality.UNARY_UNARY, CheckVerifierStatusRequest, CheckVerifierStatusResponse, ), \"/services.trustregistry.v1.TrustRegistry/FetchData\": grpclib.const.Handler( self.__rpc_fetch_data, grpclib.const.Cardinality.UNARY_STREAM,", "TrustRegistryBase(ServiceBase): async def add_framework( self, governance_framework: \"GovernanceFramework\" ) -> \"AddFrameworkResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async", "governance_framework_uri request.query = query async for response in self._unary_stream( \"/services.trustregistry.v1.TrustRegistry/FetchData\", request, FetchDataResponse, ):", "valid_from_utc request.valid_until_utc = valid_until_utc request.governance_framework_uri = governance_framework_uri return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/RegisterIssuer\", request, RegisterIssuerResponse,", "), \"/services.trustregistry.v1.TrustRegistry/RegisterIssuer\": grpclib.const.Handler( self.__rpc_register_issuer, grpclib.const.Cardinality.UNARY_UNARY, RegisterIssuerRequest, RegisterIssuerResponse, ), \"/services.trustregistry.v1.TrustRegistry/RegisterVerifier\": grpclib.const.Handler( self.__rpc_register_verifier, grpclib.const.Cardinality.UNARY_UNARY, RegisterVerifierRequest,", "str, credential_type_uri: str, governance_framework_uri: str, ) -> \"UnregisterIssuerResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def unregister_verifier(", "x509_cert: str = \"\", presentation_type_uri: str = \"\", ) -> \"CheckVerifierStatusResponse\": request =", "request.query = query request.continuation_token = continuation_token if options is not None: request.options =", "= betterproto.enum_field(1) @dataclass(eq=False, repr=False) class RegisterVerifierRequest(betterproto.Message): did_uri: str = betterproto.string_field(1, group=\"authority\") x509_cert: str", "str = betterproto.string_field(1) query: str = betterproto.string_field(2) @dataclass(eq=False, repr=False) class FetchDataResponse(betterproto.Message): response_json: str", "= { \"governance_framework_uri\": request.governance_framework_uri, \"query\": request.query, } await self._call_rpc_handler_server_stream( self.fetch_data, stream, request_kwargs, )", "str = betterproto.string_field(2, group=\"authority\") presentation_type_uri: str = betterproto.string_field(10) governance_framework_uri: str = betterproto.string_field(20) @dataclass(eq=False,", "governance_framework_uri: str = \"\", ) -> \"UnregisterVerifierResponse\": request = UnregisterVerifierRequest() if did_uri: request.did_uri", "await self.check_verifier_status(**request_kwargs) await stream.send_message(response) async def __rpc_fetch_data(self, stream: grpclib.server.Stream) -> None: request =", "stream.recv_message() request_kwargs = { \"did_uri\": request.did_uri, \"x509_cert\": request.x509_cert, \"credential_type_uri\": request.credential_type_uri, \"governance_framework_uri\": request.governance_framework_uri, }", "UnregisterVerifierResponse, ), \"/services.trustregistry.v1.TrustRegistry/CheckIssuerStatus\": grpclib.const.Handler( self.__rpc_check_issuer_status, grpclib.const.Cardinality.UNARY_UNARY, CheckIssuerStatusRequest, CheckIssuerStatusResponse, ), \"/services.trustregistry.v1.TrustRegistry/CheckVerifierStatus\": grpclib.const.Handler( self.__rpc_check_verifier_status, grpclib.const.Cardinality.UNARY_UNARY,", "str = \"\", did_uri: str = \"\", x509_cert: str = \"\", credential_type_uri: str", "class SearchRegistryResponse(betterproto.Message): items_json: str = betterproto.string_field(1) has_more: bool = betterproto.bool_field(2) count: int =", "import dataclass from typing import AsyncIterator, Dict import betterproto from betterproto.grpc.grpclib_server import ServiceBase", "= \"\", credential_type_uri: str = \"\", valid_from_utc: int = 0, valid_until_utc: int =", "@dataclass(eq=False, repr=False) class GovernanceFramework(betterproto.Message): governance_framework_uri: str = betterproto.string_field(1) trust_registry_uri: str = betterproto.string_field(2) description:", "betterproto.string_field(1) has_more_results: bool = betterproto.bool_field(2) continuation_token: str = betterproto.string_field(3) class TrustRegistryStub(betterproto.ServiceStub): async def", "unregister_verifier( self, did_uri: str, x509_cert: str, presentation_type_uri: str, governance_framework_uri: str, ) -> \"UnregisterVerifierResponse\":", "self, governance_framework_uri: str, did_uri: str, x509_cert: str, presentation_type_uri: str, ) -> \"CheckVerifierStatusResponse\": raise", "async def search_registry( self, *, query: str = \"\", continuation_token: str = \"\",", "def __rpc_register_issuer(self, stream: grpclib.server.Stream) -> None: request = await stream.recv_message() request_kwargs = {", "Dict import betterproto from betterproto.grpc.grpclib_server import ServiceBase import grpclib class RegistrationStatus(betterproto.Enum): CURRENT =", "class RegisterIssuerResponse(betterproto.Message): status: \"__common_v1__.ResponseStatus\" = betterproto.enum_field(1) @dataclass(eq=False, repr=False) class RegisterVerifierRequest(betterproto.Message): did_uri: str =", "= betterproto.string_field(3, group=\"member\") presentation_type_uri: str = betterproto.string_field(4) @dataclass(eq=False, repr=False) class CheckVerifierStatusResponse(betterproto.Message): status: \"RegistrationStatus\"", "repr=False) class GovernanceFramework(betterproto.Message): governance_framework_uri: str = betterproto.string_field(1) trust_registry_uri: str = betterproto.string_field(2) description: str", "request = RegisterIssuerRequest() if did_uri: request.did_uri = did_uri if x509_cert: request.x509_cert = x509_cert", "check_verifier_status( self, governance_framework_uri: str, did_uri: str, x509_cert: str, presentation_type_uri: str, ) -> \"CheckVerifierStatusResponse\":", "None ) -> \"RemoveFrameworkResponse\": request = RemoveFrameworkRequest() if governance_framework is not None: request.governance_framework", "credential_type_uri: str = \"\", ) -> \"CheckIssuerStatusResponse\": request = CheckIssuerStatusRequest() request.governance_framework_uri = governance_framework_uri", "= presentation_type_uri return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/CheckVerifierStatus\", request, CheckVerifierStatusResponse, ) async def fetch_data( self,", "request = RemoveFrameworkRequest() if governance_framework is not None: request.governance_framework = governance_framework return await", "if did_uri: request.did_uri = did_uri if x509_cert: request.x509_cert = x509_cert request.presentation_type_uri = presentation_type_uri", "request.presentation_type_uri, \"valid_from_utc\": request.valid_from_utc, \"valid_until_utc\": request.valid_until_utc, \"governance_framework_uri\": request.governance_framework_uri, } response = await self.register_verifier(**request_kwargs) await", "-> \"UnregisterIssuerResponse\": request = UnregisterIssuerRequest() if did_uri: request.did_uri = did_uri if x509_cert: request.x509_cert", "= \"\", ) -> \"RegisterIssuerResponse\": request = RegisterIssuerRequest() if did_uri: request.did_uri = did_uri", "request.did_uri, \"x509_cert\": request.x509_cert, \"presentation_type_uri\": request.presentation_type_uri, \"governance_framework_uri\": request.governance_framework_uri, } response = await self.unregister_verifier(**request_kwargs) await", "stream.send_message(response) async def __rpc_remove_framework(self, stream: grpclib.server.Stream) -> None: request = await stream.recv_message() request_kwargs", "async def remove_framework( self, *, governance_framework: \"GovernanceFramework\" = None ) -> \"RemoveFrameworkResponse\": request", "= governance_framework return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/RemoveFramework\", request, RemoveFrameworkResponse, ) async def search_registry( self,", "= { \"governance_framework\": request.governance_framework, } response = await self.add_framework(**request_kwargs) await stream.send_message(response) async def", "request = RegisterVerifierRequest() if did_uri: request.did_uri = did_uri if x509_cert: request.x509_cert = x509_cert", "repr=False) class UnregisterVerifierRequest(betterproto.Message): did_uri: str = betterproto.string_field(1, group=\"authority\") x509_cert: str = betterproto.string_field(2, group=\"authority\")", "response = await self.remove_framework(**request_kwargs) await stream.send_message(response) async def __rpc_search_registry(self, stream: grpclib.server.Stream) -> None:", "request = UnregisterVerifierRequest() if did_uri: request.did_uri = did_uri if x509_cert: request.x509_cert = x509_cert", "= await self.add_framework(**request_kwargs) await stream.send_message(response) async def __rpc_remove_framework(self, stream: grpclib.server.Stream) -> None: request", "request, AddFrameworkResponse, ) async def remove_framework( self, *, governance_framework: \"GovernanceFramework\" = None )", "str = \"\", continuation_token: str = \"\", options: \"__common_v1__.RequestOptions\" = None, ) ->", "grpclib.const.Cardinality.UNARY_UNARY, RegisterIssuerRequest, RegisterIssuerResponse, ), \"/services.trustregistry.v1.TrustRegistry/RegisterVerifier\": grpclib.const.Handler( self.__rpc_register_verifier, grpclib.const.Cardinality.UNARY_UNARY, RegisterVerifierRequest, RegisterVerifierResponse, ), \"/services.trustregistry.v1.TrustRegistry/UnregisterIssuer\": grpclib.const.Handler(", "governance_framework_uri: str = \"\", ) -> \"UnregisterIssuerResponse\": request = UnregisterIssuerRequest() if did_uri: request.did_uri", "= betterproto.bool_field(2) count: int = betterproto.int32_field(3) continuation_token: str = betterproto.string_field(4) @dataclass(eq=False, repr=False) class", "\"\", valid_from_utc: int = 0, valid_until_utc: int = 0, governance_framework_uri: str = \"\",", "= \"\", presentation_type_uri: str = \"\", governance_framework_uri: str = \"\", ) -> \"UnregisterVerifierResponse\":", "self.check_issuer_status(**request_kwargs) await stream.send_message(response) async def __rpc_check_verifier_status(self, stream: grpclib.server.Stream) -> None: request = await", "str = betterproto.string_field(2) @dataclass(eq=False, repr=False) class FetchDataResponse(betterproto.Message): response_json: str = betterproto.string_field(1) has_more_results: bool", "async def __rpc_check_verifier_status(self, stream: grpclib.server.Stream) -> None: request = await stream.recv_message() request_kwargs =", "governance_framework: \"GovernanceFramework\" ) -> \"RemoveFrameworkResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def search_registry( self, query: str,", "request_kwargs = { \"governance_framework_uri\": request.governance_framework_uri, \"did_uri\": request.did_uri, \"x509_cert\": request.x509_cert, \"presentation_type_uri\": request.presentation_type_uri, } response", "repr=False) class UnregisterIssuerResponse(betterproto.Message): status: \"__common_v1__.ResponseStatus\" = betterproto.enum_field(1) @dataclass(eq=False, repr=False) class UnregisterVerifierRequest(betterproto.Message): did_uri: str", "\"RegisterVerifierResponse\": request = RegisterVerifierRequest() if did_uri: request.did_uri = did_uri if x509_cert: request.x509_cert =", "str = \"\", credential_type_uri: str = \"\", valid_from_utc: int = 0, valid_until_utc: int", "await self._call_rpc_handler_server_stream( self.fetch_data, stream, request_kwargs, ) def __mapping__(self) -> Dict[str, grpclib.const.Handler]: return {", "str = betterproto.string_field(2, group=\"member\") x509_cert: str = betterproto.string_field(3, group=\"member\") presentation_type_uri: str = betterproto.string_field(4)", "AddFrameworkRequest(betterproto.Message): governance_framework: \"GovernanceFramework\" = betterproto.message_field(1) @dataclass(eq=False, repr=False) class AddFrameworkResponse(betterproto.Message): status: \"__common_v1__.ResponseStatus\" = betterproto.enum_field(1)", "def remove_framework( self, *, governance_framework: \"GovernanceFramework\" = None ) -> \"RemoveFrameworkResponse\": request =", "stream.recv_message() request_kwargs = { \"governance_framework\": request.governance_framework, } response = await self.add_framework(**request_kwargs) await stream.send_message(response)", "items_json: str = betterproto.string_field(1) has_more: bool = betterproto.bool_field(2) count: int = betterproto.int32_field(3) continuation_token:", "str = betterproto.string_field(3) @dataclass(eq=False, repr=False) class RegisterIssuerRequest(betterproto.Message): did_uri: str = betterproto.string_field(1, group=\"authority\") x509_cert:", "status: \"__common_v1__.ResponseStatus\" = betterproto.enum_field(1) @dataclass(eq=False, repr=False) class UnregisterIssuerRequest(betterproto.Message): did_uri: str = betterproto.string_field(1, group=\"authority\")", "did_uri: str = betterproto.string_field(1, group=\"authority\") x509_cert: str = betterproto.string_field(2, group=\"authority\") presentation_type_uri: str =", "grpclib.const.Handler( self.__rpc_check_issuer_status, grpclib.const.Cardinality.UNARY_UNARY, CheckIssuerStatusRequest, CheckIssuerStatusResponse, ), \"/services.trustregistry.v1.TrustRegistry/CheckVerifierStatus\": grpclib.const.Handler( self.__rpc_check_verifier_status, grpclib.const.Cardinality.UNARY_UNARY, CheckVerifierStatusRequest, CheckVerifierStatusResponse, ),", "\"valid_until_utc\": request.valid_until_utc, \"governance_framework_uri\": request.governance_framework_uri, } response = await self.register_verifier(**request_kwargs) await stream.send_message(response) async def", "betterproto.string_field(1) did_uri: str = betterproto.string_field(2, group=\"member\") x509_cert: str = betterproto.string_field(3, group=\"member\") credential_type_uri: str", "= CheckIssuerStatusRequest() request.governance_framework_uri = governance_framework_uri if did_uri: request.did_uri = did_uri if x509_cert: request.x509_cert", "governance_framework: \"GovernanceFramework\" = betterproto.message_field(1) @dataclass(eq=False, repr=False) class AddFrameworkResponse(betterproto.Message): status: \"__common_v1__.ResponseStatus\" = betterproto.enum_field(1) @dataclass(eq=False,", "x509_cert request.credential_type_uri = credential_type_uri return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/CheckIssuerStatus\", request, CheckIssuerStatusResponse, ) async def", "repr=False) class AddFrameworkResponse(betterproto.Message): status: \"__common_v1__.ResponseStatus\" = betterproto.enum_field(1) @dataclass(eq=False, repr=False) class RemoveFrameworkRequest(betterproto.Message): governance_framework: \"GovernanceFramework\"", "{ \"governance_framework\": request.governance_framework, } response = await self.add_framework(**request_kwargs) await stream.send_message(response) async def __rpc_remove_framework(self,", "str = \"\", valid_from_utc: int = 0, valid_until_utc: int = 0, governance_framework_uri: str", "= credential_type_uri return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/CheckIssuerStatus\", request, CheckIssuerStatusResponse, ) async def check_verifier_status( self,", "= governance_framework return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/AddFramework\", request, AddFrameworkResponse, ) async def remove_framework( self,", "self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/SearchRegistry\", request, SearchRegistryResponse, ) async def register_issuer( self, *, did_uri: str =", "-> \"CheckIssuerStatusResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def check_verifier_status( self, governance_framework_uri: str, did_uri: str, x509_cert:", "= betterproto.string_field(2, group=\"member\") x509_cert: str = betterproto.string_field(3, group=\"member\") presentation_type_uri: str = betterproto.string_field(4) @dataclass(eq=False,", "x509_cert request.presentation_type_uri = presentation_type_uri return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/CheckVerifierStatus\", request, CheckVerifierStatusResponse, ) async def", "= betterproto.string_field(1) has_more: bool = betterproto.bool_field(2) count: int = betterproto.int32_field(3) continuation_token: str =", "= RemoveFrameworkRequest() if governance_framework is not None: request.governance_framework = governance_framework return await self._unary_unary(", "self, governance_framework_uri: str, did_uri: str, x509_cert: str, credential_type_uri: str, ) -> \"CheckIssuerStatusResponse\": raise", "self._unary_stream( \"/services.trustregistry.v1.TrustRegistry/FetchData\", request, FetchDataResponse, ): yield response class TrustRegistryBase(ServiceBase): async def add_framework( self,", "str, valid_from_utc: int, valid_until_utc: int, governance_framework_uri: str, ) -> \"RegisterVerifierResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async", "RegisterVerifierRequest, RegisterVerifierResponse, ), \"/services.trustregistry.v1.TrustRegistry/UnregisterIssuer\": grpclib.const.Handler( self.__rpc_unregister_issuer, grpclib.const.Cardinality.UNARY_UNARY, UnregisterIssuerRequest, UnregisterIssuerResponse, ), \"/services.trustregistry.v1.TrustRegistry/UnregisterVerifier\": grpclib.const.Handler( self.__rpc_unregister_verifier,", "@dataclass(eq=False, repr=False) class RemoveFrameworkResponse(betterproto.Message): status: \"__common_v1__.ResponseStatus\" = betterproto.enum_field(1) @dataclass(eq=False, repr=False) class SearchRegistryRequest(betterproto.Message): query:", ") -> \"UnregisterIssuerResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def unregister_verifier( self, did_uri: str, x509_cert: str,", "= await stream.recv_message() request_kwargs = { \"governance_framework_uri\": request.governance_framework_uri, \"query\": request.query, } await self._call_rpc_handler_server_stream(", "} response = await self.check_issuer_status(**request_kwargs) await stream.send_message(response) async def __rpc_check_verifier_status(self, stream: grpclib.server.Stream) ->", "AsyncIterator[\"FetchDataResponse\"]: raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def __rpc_add_framework(self, stream: grpclib.server.Stream) -> None: request = await", "str = betterproto.string_field(3) class TrustRegistryStub(betterproto.ServiceStub): async def add_framework( self, *, governance_framework: \"GovernanceFramework\" =", "str = betterproto.string_field(10) valid_from_utc: int = betterproto.uint64_field(11) valid_until_utc: int = betterproto.uint64_field(12) governance_framework_uri: str", "@dataclass(eq=False, repr=False) class UnregisterVerifierResponse(betterproto.Message): status: \"__common_v1__.ResponseStatus\" = betterproto.enum_field(1) @dataclass(eq=False, repr=False) class CheckIssuerStatusRequest(betterproto.Message): governance_framework_uri:", "{ \"governance_framework_uri\": request.governance_framework_uri, \"did_uri\": request.did_uri, \"x509_cert\": request.x509_cert, \"presentation_type_uri\": request.presentation_type_uri, } response = await", "= \"\", x509_cert: str = \"\", presentation_type_uri: str = \"\", valid_from_utc: int =", "repr=False) class UnregisterIssuerRequest(betterproto.Message): did_uri: str = betterproto.string_field(1, group=\"authority\") x509_cert: str = betterproto.string_field(2, group=\"authority\")", "await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/UnregisterVerifier\", request, UnregisterVerifierResponse, ) async def check_issuer_status( self, *, governance_framework_uri: str", "request_kwargs = { \"governance_framework\": request.governance_framework, } response = await self.add_framework(**request_kwargs) await stream.send_message(response) async", "response = await self.add_framework(**request_kwargs) await stream.send_message(response) async def __rpc_remove_framework(self, stream: grpclib.server.Stream) -> None:", "= await stream.recv_message() request_kwargs = { \"governance_framework_uri\": request.governance_framework_uri, \"did_uri\": request.did_uri, \"x509_cert\": request.x509_cert, \"credential_type_uri\":", "class CheckVerifierStatusRequest(betterproto.Message): governance_framework_uri: str = betterproto.string_field(1) did_uri: str = betterproto.string_field(2, group=\"member\") x509_cert: str", "async def unregister_issuer( self, did_uri: str, x509_cert: str, credential_type_uri: str, governance_framework_uri: str, )", "\"\", x509_cert: str = \"\", presentation_type_uri: str = \"\", valid_from_utc: int = 0,", "-> \"RegisterVerifierResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def unregister_issuer( self, did_uri: str, x509_cert: str, credential_type_uri:", "= { \"did_uri\": request.did_uri, \"x509_cert\": request.x509_cert, \"credential_type_uri\": request.credential_type_uri, \"valid_from_utc\": request.valid_from_utc, \"valid_until_utc\": request.valid_until_utc, \"governance_framework_uri\":", "str, x509_cert: str, presentation_type_uri: str, ) -> \"CheckVerifierStatusResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def fetch_data(", "grpclib.const.Cardinality.UNARY_UNARY, RegisterVerifierRequest, RegisterVerifierResponse, ), \"/services.trustregistry.v1.TrustRegistry/UnregisterIssuer\": grpclib.const.Handler( self.__rpc_unregister_issuer, grpclib.const.Cardinality.UNARY_UNARY, UnregisterIssuerRequest, UnregisterIssuerResponse, ), \"/services.trustregistry.v1.TrustRegistry/UnregisterVerifier\": grpclib.const.Handler(", "ServiceBase import grpclib class RegistrationStatus(betterproto.Enum): CURRENT = 0 EXPIRED = 1 TERMINATED =", "None: request.governance_framework = governance_framework return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/AddFramework\", request, AddFrameworkResponse, ) async def", "= betterproto.string_field(1, group=\"authority\") x509_cert: str = betterproto.string_field(2, group=\"authority\") presentation_type_uri: str = betterproto.string_field(10) governance_framework_uri:", "self, *, query: str = \"\", continuation_token: str = \"\", options: \"__common_v1__.RequestOptions\" =", "presentation_type_uri: str = \"\", valid_from_utc: int = 0, valid_until_utc: int = 0, governance_framework_uri:", "{ \"did_uri\": request.did_uri, \"x509_cert\": request.x509_cert, \"presentation_type_uri\": request.presentation_type_uri, \"governance_framework_uri\": request.governance_framework_uri, } response = await", "None: request.governance_framework = governance_framework return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/RemoveFramework\", request, RemoveFrameworkResponse, ) async def", "\"governance_framework\": request.governance_framework, } response = await self.add_framework(**request_kwargs) await stream.send_message(response) async def __rpc_remove_framework(self, stream:", "def check_issuer_status( self, *, governance_framework_uri: str = \"\", did_uri: str = \"\", x509_cert:", "credential_type_uri: str = \"\", governance_framework_uri: str = \"\", ) -> \"UnregisterIssuerResponse\": request =", "UnregisterIssuerResponse(betterproto.Message): status: \"__common_v1__.ResponseStatus\" = betterproto.enum_field(1) @dataclass(eq=False, repr=False) class UnregisterVerifierRequest(betterproto.Message): did_uri: str = betterproto.string_field(1,", "presentation_type_uri request.governance_framework_uri = governance_framework_uri return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/UnregisterVerifier\", request, UnregisterVerifierResponse, ) async def", ") async def fetch_data( self, *, governance_framework_uri: str = \"\", query: str =", "= betterproto.bool_field(2) continuation_token: str = betterproto.string_field(3) class TrustRegistryStub(betterproto.ServiceStub): async def add_framework( self, *,", "\"UnregisterIssuerResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def unregister_verifier( self, did_uri: str, x509_cert: str, presentation_type_uri: str,", "valid_from_utc request.valid_until_utc = valid_until_utc request.governance_framework_uri = governance_framework_uri return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/RegisterVerifier\", request, RegisterVerifierResponse,", "None: request = await stream.recv_message() request_kwargs = { \"did_uri\": request.did_uri, \"x509_cert\": request.x509_cert, \"credential_type_uri\":", "repr=False) class FetchDataResponse(betterproto.Message): response_json: str = betterproto.string_field(1) has_more_results: bool = betterproto.bool_field(2) continuation_token: str", "remove_framework( self, governance_framework: \"GovernanceFramework\" ) -> \"RemoveFrameworkResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def search_registry( self,", "str, governance_framework_uri: str, ) -> \"UnregisterIssuerResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def unregister_verifier( self, did_uri:", "class AddFrameworkResponse(betterproto.Message): status: \"__common_v1__.ResponseStatus\" = betterproto.enum_field(1) @dataclass(eq=False, repr=False) class RemoveFrameworkRequest(betterproto.Message): governance_framework: \"GovernanceFramework\" =", "@dataclass(eq=False, repr=False) class CheckIssuerStatusRequest(betterproto.Message): governance_framework_uri: str = betterproto.string_field(1) did_uri: str = betterproto.string_field(2, group=\"member\")", "request.credential_type_uri = credential_type_uri request.governance_framework_uri = governance_framework_uri return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/UnregisterIssuer\", request, UnregisterIssuerResponse, )", "\"\", credential_type_uri: str = \"\", ) -> \"CheckIssuerStatusResponse\": request = CheckIssuerStatusRequest() request.governance_framework_uri =", "= await stream.recv_message() request_kwargs = { \"governance_framework_uri\": request.governance_framework_uri, \"did_uri\": request.did_uri, \"x509_cert\": request.x509_cert, \"presentation_type_uri\":", "AsyncIterator, Dict import betterproto from betterproto.grpc.grpclib_server import ServiceBase import grpclib class RegistrationStatus(betterproto.Enum): CURRENT", "None: request = await stream.recv_message() request_kwargs = { \"did_uri\": request.did_uri, \"x509_cert\": request.x509_cert, \"presentation_type_uri\":", "UnregisterIssuerResponse, ), \"/services.trustregistry.v1.TrustRegistry/UnregisterVerifier\": grpclib.const.Handler( self.__rpc_unregister_verifier, grpclib.const.Cardinality.UNARY_UNARY, UnregisterVerifierRequest, UnregisterVerifierResponse, ), \"/services.trustregistry.v1.TrustRegistry/CheckIssuerStatus\": grpclib.const.Handler( self.__rpc_check_issuer_status, grpclib.const.Cardinality.UNARY_UNARY,", "str = \"\", ) -> \"UnregisterVerifierResponse\": request = UnregisterVerifierRequest() if did_uri: request.did_uri =", "} response = await self.unregister_issuer(**request_kwargs) await stream.send_message(response) async def __rpc_unregister_verifier(self, stream: grpclib.server.Stream) ->", "str, ) -> \"UnregisterIssuerResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def unregister_verifier( self, did_uri: str, x509_cert:", "betterproto.enum_field(1) @dataclass(eq=False, repr=False) class SearchRegistryRequest(betterproto.Message): query: str = betterproto.string_field(1) continuation_token: str = betterproto.string_field(2)", "await stream.recv_message() request_kwargs = { \"governance_framework\": request.governance_framework, } response = await self.add_framework(**request_kwargs) await", "betterproto.string_field(2, group=\"authority\") presentation_type_uri: str = betterproto.string_field(10) governance_framework_uri: str = betterproto.string_field(20) @dataclass(eq=False, repr=False) class", "return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/UnregisterIssuer\", request, UnregisterIssuerResponse, ) async def unregister_verifier( self, *, did_uri:", "= x509_cert request.presentation_type_uri = presentation_type_uri return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/CheckVerifierStatus\", request, CheckVerifierStatusResponse, ) async", "-> None: request = await stream.recv_message() request_kwargs = { \"governance_framework\": request.governance_framework, } response", "str = \"\", x509_cert: str = \"\", credential_type_uri: str = \"\", ) ->", "\"\", governance_framework_uri: str = \"\", ) -> \"UnregisterIssuerResponse\": request = UnregisterIssuerRequest() if did_uri:", "\"valid_from_utc\": request.valid_from_utc, \"valid_until_utc\": request.valid_until_utc, \"governance_framework_uri\": request.governance_framework_uri, } response = await self.register_verifier(**request_kwargs) await stream.send_message(response)", "None: request = await stream.recv_message() request_kwargs = { \"governance_framework_uri\": request.governance_framework_uri, \"query\": request.query, }", "request.continuation_token = continuation_token if options is not None: request.options = options return await", "presentation_type_uri request.valid_from_utc = valid_from_utc request.valid_until_utc = valid_until_utc request.governance_framework_uri = governance_framework_uri return await self._unary_unary(", "= { \"did_uri\": request.did_uri, \"x509_cert\": request.x509_cert, \"credential_type_uri\": request.credential_type_uri, \"governance_framework_uri\": request.governance_framework_uri, } response =", "\"\", ) -> \"RegisterVerifierResponse\": request = RegisterVerifierRequest() if did_uri: request.did_uri = did_uri if", "str = \"\", governance_framework_uri: str = \"\", ) -> \"UnregisterIssuerResponse\": request = UnregisterIssuerRequest()", "request.did_uri = did_uri if x509_cert: request.x509_cert = x509_cert request.presentation_type_uri = presentation_type_uri request.governance_framework_uri =", "did_uri: str = \"\", x509_cert: str = \"\", presentation_type_uri: str = \"\", )", ") async def check_verifier_status( self, *, governance_framework_uri: str = \"\", did_uri: str =", "governance_framework_uri return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/RegisterIssuer\", request, RegisterIssuerResponse, ) async def register_verifier( self, *,", "await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/RemoveFramework\", request, RemoveFrameworkResponse, ) async def search_registry( self, *, query: str", "grpclib.const.Cardinality.UNARY_UNARY, CheckIssuerStatusRequest, CheckIssuerStatusResponse, ), \"/services.trustregistry.v1.TrustRegistry/CheckVerifierStatus\": grpclib.const.Handler( self.__rpc_check_verifier_status, grpclib.const.Cardinality.UNARY_UNARY, CheckVerifierStatusRequest, CheckVerifierStatusResponse, ), \"/services.trustregistry.v1.TrustRegistry/FetchData\": grpclib.const.Handler(", "str, ) -> \"UnregisterVerifierResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def check_issuer_status( self, governance_framework_uri: str, did_uri:", "raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def register_verifier( self, did_uri: str, x509_cert: str, presentation_type_uri: str, valid_from_utc:", "self, did_uri: str, x509_cert: str, presentation_type_uri: str, governance_framework_uri: str, ) -> \"UnregisterVerifierResponse\": raise", "self.__rpc_register_issuer, grpclib.const.Cardinality.UNARY_UNARY, RegisterIssuerRequest, RegisterIssuerResponse, ), \"/services.trustregistry.v1.TrustRegistry/RegisterVerifier\": grpclib.const.Handler( self.__rpc_register_verifier, grpclib.const.Cardinality.UNARY_UNARY, RegisterVerifierRequest, RegisterVerifierResponse, ), \"/services.trustregistry.v1.TrustRegistry/UnregisterIssuer\":", "str = betterproto.string_field(2, group=\"authority\") presentation_type_uri: str = betterproto.string_field(10) valid_from_utc: int = betterproto.uint64_field(11) valid_until_utc:", "= betterproto.string_field(2, group=\"authority\") presentation_type_uri: str = betterproto.string_field(10) governance_framework_uri: str = betterproto.string_field(20) @dataclass(eq=False, repr=False)", "\"\", credential_type_uri: str = \"\", valid_from_utc: int = 0, valid_until_utc: int = 0,", "governance_framework_uri: str, ) -> \"UnregisterVerifierResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def check_issuer_status( self, governance_framework_uri: str,", "} response = await self.search_registry(**request_kwargs) await stream.send_message(response) async def __rpc_register_issuer(self, stream: grpclib.server.Stream) ->", "str, x509_cert: str, credential_type_uri: str, ) -> \"CheckIssuerStatusResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def check_verifier_status(", "request_kwargs = { \"did_uri\": request.did_uri, \"x509_cert\": request.x509_cert, \"credential_type_uri\": request.credential_type_uri, \"governance_framework_uri\": request.governance_framework_uri, } response", "await self.unregister_verifier(**request_kwargs) await stream.send_message(response) async def __rpc_check_issuer_status(self, stream: grpclib.server.Stream) -> None: request =", "\"/services.trustregistry.v1.TrustRegistry/RegisterIssuer\", request, RegisterIssuerResponse, ) async def register_verifier( self, *, did_uri: str = \"\",", "\"GovernanceFramework\" = None ) -> \"RemoveFrameworkResponse\": request = RemoveFrameworkRequest() if governance_framework is not", "services/trust-registry/v1/trust-registry.proto # plugin: python-betterproto from dataclasses import dataclass from typing import AsyncIterator, Dict", "= betterproto.uint64_field(12) governance_framework_uri: str = betterproto.string_field(20) @dataclass(eq=False, repr=False) class RegisterIssuerResponse(betterproto.Message): status: \"__common_v1__.ResponseStatus\" =", "governance_framework return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/RemoveFramework\", request, RemoveFrameworkResponse, ) async def search_registry( self, *,", "1 TERMINATED = 2 REVOKED = 3 NOT_FOUND = 10 @dataclass(eq=False, repr=False) class", "stream.recv_message() request_kwargs = { \"governance_framework_uri\": request.governance_framework_uri, \"did_uri\": request.did_uri, \"x509_cert\": request.x509_cert, \"presentation_type_uri\": request.presentation_type_uri, }", "async def check_verifier_status( self, governance_framework_uri: str, did_uri: str, x509_cert: str, presentation_type_uri: str, )", "str = \"\", x509_cert: str = \"\", presentation_type_uri: str = \"\", governance_framework_uri: str", "repr=False) class RemoveFrameworkResponse(betterproto.Message): status: \"__common_v1__.ResponseStatus\" = betterproto.enum_field(1) @dataclass(eq=False, repr=False) class SearchRegistryRequest(betterproto.Message): query: str", "*, governance_framework: \"GovernanceFramework\" = None ) -> \"RemoveFrameworkResponse\": request = RemoveFrameworkRequest() if governance_framework", "3 NOT_FOUND = 10 @dataclass(eq=False, repr=False) class AddFrameworkRequest(betterproto.Message): governance_framework: \"GovernanceFramework\" = betterproto.message_field(1) @dataclass(eq=False,", "grpclib.const.Handler( self.__rpc_search_registry, grpclib.const.Cardinality.UNARY_UNARY, SearchRegistryRequest, SearchRegistryResponse, ), \"/services.trustregistry.v1.TrustRegistry/RegisterIssuer\": grpclib.const.Handler( self.__rpc_register_issuer, grpclib.const.Cardinality.UNARY_UNARY, RegisterIssuerRequest, RegisterIssuerResponse, ),", "request.valid_from_utc = valid_from_utc request.valid_until_utc = valid_until_utc request.governance_framework_uri = governance_framework_uri return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/RegisterIssuer\",", "stream.send_message(response) async def __rpc_search_registry(self, stream: grpclib.server.Stream) -> None: request = await stream.recv_message() request_kwargs", "stream.send_message(response) async def __rpc_check_verifier_status(self, stream: grpclib.server.Stream) -> None: request = await stream.recv_message() request_kwargs", "-> \"SearchRegistryResponse\": request = SearchRegistryRequest() request.query = query request.continuation_token = continuation_token if options", "credential_type_uri: str, ) -> \"CheckIssuerStatusResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def check_verifier_status( self, governance_framework_uri: str,", "} response = await self.check_verifier_status(**request_kwargs) await stream.send_message(response) async def __rpc_fetch_data(self, stream: grpclib.server.Stream) ->", "\"/services.trustregistry.v1.TrustRegistry/CheckIssuerStatus\": grpclib.const.Handler( self.__rpc_check_issuer_status, grpclib.const.Cardinality.UNARY_UNARY, CheckIssuerStatusRequest, CheckIssuerStatusResponse, ), \"/services.trustregistry.v1.TrustRegistry/CheckVerifierStatus\": grpclib.const.Handler( self.__rpc_check_verifier_status, grpclib.const.Cardinality.UNARY_UNARY, CheckVerifierStatusRequest, CheckVerifierStatusResponse,", "None: request = await stream.recv_message() request_kwargs = { \"query\": request.query, \"continuation_token\": request.continuation_token, \"options\":", "-> \"SearchRegistryResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def register_issuer( self, did_uri: str, x509_cert: str, credential_type_uri:", "group=\"member\") x509_cert: str = betterproto.string_field(3, group=\"member\") presentation_type_uri: str = betterproto.string_field(4) @dataclass(eq=False, repr=False) class", "betterproto.string_field(4) @dataclass(eq=False, repr=False) class CheckVerifierStatusResponse(betterproto.Message): status: \"RegistrationStatus\" = betterproto.enum_field(1) @dataclass(eq=False, repr=False) class FetchDataRequest(betterproto.Message):", "typing import AsyncIterator, Dict import betterproto from betterproto.grpc.grpclib_server import ServiceBase import grpclib class", "betterproto.string_field(4) @dataclass(eq=False, repr=False) class CheckIssuerStatusResponse(betterproto.Message): status: \"RegistrationStatus\" = betterproto.enum_field(1) @dataclass(eq=False, repr=False) class CheckVerifierStatusRequest(betterproto.Message):", "def fetch_data( self, *, governance_framework_uri: str = \"\", query: str = \"\" )", "response_json: str = betterproto.string_field(1) has_more_results: bool = betterproto.bool_field(2) continuation_token: str = betterproto.string_field(3) class", "request_kwargs = { \"governance_framework_uri\": request.governance_framework_uri, \"query\": request.query, } await self._call_rpc_handler_server_stream( self.fetch_data, stream, request_kwargs,", "@dataclass(eq=False, repr=False) class SearchRegistryResponse(betterproto.Message): items_json: str = betterproto.string_field(1) has_more: bool = betterproto.bool_field(2) count:", "query: str = betterproto.string_field(1) continuation_token: str = betterproto.string_field(2) options: \"__common_v1__.RequestOptions\" = betterproto.message_field(5) @dataclass(eq=False,", "= x509_cert request.credential_type_uri = credential_type_uri request.valid_from_utc = valid_from_utc request.valid_until_utc = valid_until_utc request.governance_framework_uri =", "raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def search_registry( self, query: str, continuation_token: str, options: \"__common_v1__.RequestOptions\", )", "str = betterproto.string_field(1) continuation_token: str = betterproto.string_field(2) options: \"__common_v1__.RequestOptions\" = betterproto.message_field(5) @dataclass(eq=False, repr=False)", "x509_cert: request.x509_cert = x509_cert request.presentation_type_uri = presentation_type_uri request.valid_from_utc = valid_from_utc request.valid_until_utc = valid_until_utc", "response = await self.unregister_issuer(**request_kwargs) await stream.send_message(response) async def __rpc_unregister_verifier(self, stream: grpclib.server.Stream) -> None:", "request = await stream.recv_message() request_kwargs = { \"governance_framework_uri\": request.governance_framework_uri, \"query\": request.query, } await", "betterproto.string_field(2, group=\"authority\") credential_type_uri: str = betterproto.string_field(10) governance_framework_uri: str = betterproto.string_field(20) @dataclass(eq=False, repr=False) class", "request.did_uri = did_uri if x509_cert: request.x509_cert = x509_cert request.presentation_type_uri = presentation_type_uri return await", "options: \"__common_v1__.RequestOptions\", ) -> \"SearchRegistryResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def register_issuer( self, did_uri: str,", "\"x509_cert\": request.x509_cert, \"credential_type_uri\": request.credential_type_uri, \"governance_framework_uri\": request.governance_framework_uri, } response = await self.unregister_issuer(**request_kwargs) await stream.send_message(response)", "UnregisterVerifierRequest() if did_uri: request.did_uri = did_uri if x509_cert: request.x509_cert = x509_cert request.presentation_type_uri =", "): yield response class TrustRegistryBase(ServiceBase): async def add_framework( self, governance_framework: \"GovernanceFramework\" ) ->", "betterproto.bool_field(2) continuation_token: str = betterproto.string_field(3) class TrustRegistryStub(betterproto.ServiceStub): async def add_framework( self, *, governance_framework:", "= options return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/SearchRegistry\", request, SearchRegistryResponse, ) async def register_issuer( self,", "betterproto.enum_field(1) @dataclass(eq=False, repr=False) class CheckVerifierStatusRequest(betterproto.Message): governance_framework_uri: str = betterproto.string_field(1) did_uri: str = betterproto.string_field(2,", "valid_until_utc: int, governance_framework_uri: str, ) -> \"RegisterIssuerResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def register_verifier( self,", "def __rpc_fetch_data(self, stream: grpclib.server.Stream) -> None: request = await stream.recv_message() request_kwargs = {", "int = 0, governance_framework_uri: str = \"\", ) -> \"RegisterVerifierResponse\": request = RegisterVerifierRequest()", "\"\", x509_cert: str = \"\", credential_type_uri: str = \"\", ) -> \"CheckIssuerStatusResponse\": request", "request = await stream.recv_message() request_kwargs = { \"did_uri\": request.did_uri, \"x509_cert\": request.x509_cert, \"credential_type_uri\": request.credential_type_uri,", "= x509_cert request.presentation_type_uri = presentation_type_uri request.valid_from_utc = valid_from_utc request.valid_until_utc = valid_until_utc request.governance_framework_uri =", "self.__rpc_register_verifier, grpclib.const.Cardinality.UNARY_UNARY, RegisterVerifierRequest, RegisterVerifierResponse, ), \"/services.trustregistry.v1.TrustRegistry/UnregisterIssuer\": grpclib.const.Handler( self.__rpc_unregister_issuer, grpclib.const.Cardinality.UNARY_UNARY, UnregisterIssuerRequest, UnregisterIssuerResponse, ), \"/services.trustregistry.v1.TrustRegistry/UnregisterVerifier\":", ") def __mapping__(self) -> Dict[str, grpclib.const.Handler]: return { \"/services.trustregistry.v1.TrustRegistry/AddFramework\": grpclib.const.Handler( self.__rpc_add_framework, grpclib.const.Cardinality.UNARY_UNARY, AddFrameworkRequest,", "Dict[str, grpclib.const.Handler]: return { \"/services.trustregistry.v1.TrustRegistry/AddFramework\": grpclib.const.Handler( self.__rpc_add_framework, grpclib.const.Cardinality.UNARY_UNARY, AddFrameworkRequest, AddFrameworkResponse, ), \"/services.trustregistry.v1.TrustRegistry/RemoveFramework\": grpclib.const.Handler(", "UnregisterVerifierRequest, UnregisterVerifierResponse, ), \"/services.trustregistry.v1.TrustRegistry/CheckIssuerStatus\": grpclib.const.Handler( self.__rpc_check_issuer_status, grpclib.const.Cardinality.UNARY_UNARY, CheckIssuerStatusRequest, CheckIssuerStatusResponse, ), \"/services.trustregistry.v1.TrustRegistry/CheckVerifierStatus\": grpclib.const.Handler( self.__rpc_check_verifier_status,", "def __rpc_add_framework(self, stream: grpclib.server.Stream) -> None: request = await stream.recv_message() request_kwargs = {", "stream.recv_message() request_kwargs = { \"did_uri\": request.did_uri, \"x509_cert\": request.x509_cert, \"presentation_type_uri\": request.presentation_type_uri, \"governance_framework_uri\": request.governance_framework_uri, }", "2 REVOKED = 3 NOT_FOUND = 10 @dataclass(eq=False, repr=False) class AddFrameworkRequest(betterproto.Message): governance_framework: \"GovernanceFramework\"", "did_uri: str = \"\", x509_cert: str = \"\", presentation_type_uri: str = \"\", valid_from_utc:", "group=\"member\") credential_type_uri: str = betterproto.string_field(4) @dataclass(eq=False, repr=False) class CheckIssuerStatusResponse(betterproto.Message): status: \"RegistrationStatus\" = betterproto.enum_field(1)", "request_kwargs = { \"governance_framework\": request.governance_framework, } response = await self.remove_framework(**request_kwargs) await stream.send_message(response) async", "request.x509_cert, \"presentation_type_uri\": request.presentation_type_uri, \"governance_framework_uri\": request.governance_framework_uri, } response = await self.unregister_verifier(**request_kwargs) await stream.send_message(response) async", "request.valid_until_utc = valid_until_utc request.governance_framework_uri = governance_framework_uri return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/RegisterVerifier\", request, RegisterVerifierResponse, )", "stream.send_message(response) async def __rpc_register_verifier(self, stream: grpclib.server.Stream) -> None: request = await stream.recv_message() request_kwargs", "request.governance_framework_uri, } response = await self.register_issuer(**request_kwargs) await stream.send_message(response) async def __rpc_register_verifier(self, stream: grpclib.server.Stream)", "governance_framework: \"GovernanceFramework\" = None ) -> \"RemoveFrameworkResponse\": request = RemoveFrameworkRequest() if governance_framework is", "= { \"query\": request.query, \"continuation_token\": request.continuation_token, \"options\": request.options, } response = await self.search_registry(**request_kwargs)", "betterproto.string_field(2, group=\"member\") x509_cert: str = betterproto.string_field(3, group=\"member\") credential_type_uri: str = betterproto.string_field(4) @dataclass(eq=False, repr=False)", "x509_cert: request.x509_cert = x509_cert request.credential_type_uri = credential_type_uri request.valid_from_utc = valid_from_utc request.valid_until_utc = valid_until_utc", "class GovernanceFramework(betterproto.Message): governance_framework_uri: str = betterproto.string_field(1) trust_registry_uri: str = betterproto.string_field(2) description: str =", "betterproto.enum_field(1) @dataclass(eq=False, repr=False) class UnregisterIssuerRequest(betterproto.Message): did_uri: str = betterproto.string_field(1, group=\"authority\") x509_cert: str =", "\"/services.trustregistry.v1.TrustRegistry/UnregisterIssuer\": grpclib.const.Handler( self.__rpc_unregister_issuer, grpclib.const.Cardinality.UNARY_UNARY, UnregisterIssuerRequest, UnregisterIssuerResponse, ), \"/services.trustregistry.v1.TrustRegistry/UnregisterVerifier\": grpclib.const.Handler( self.__rpc_unregister_verifier, grpclib.const.Cardinality.UNARY_UNARY, UnregisterVerifierRequest, UnregisterVerifierResponse,", "= \"\", ) -> \"CheckVerifierStatusResponse\": request = CheckVerifierStatusRequest() request.governance_framework_uri = governance_framework_uri if did_uri:", "str = betterproto.string_field(2, group=\"member\") x509_cert: str = betterproto.string_field(3, group=\"member\") credential_type_uri: str = betterproto.string_field(4)", "self, *, governance_framework_uri: str = \"\", query: str = \"\" ) -> AsyncIterator[\"FetchDataResponse\"]:", "\"/services.trustregistry.v1.TrustRegistry/RemoveFramework\": grpclib.const.Handler( self.__rpc_remove_framework, grpclib.const.Cardinality.UNARY_UNARY, RemoveFrameworkRequest, RemoveFrameworkResponse, ), \"/services.trustregistry.v1.TrustRegistry/SearchRegistry\": grpclib.const.Handler( self.__rpc_search_registry, grpclib.const.Cardinality.UNARY_UNARY, SearchRegistryRequest, SearchRegistryResponse,", "__rpc_register_issuer(self, stream: grpclib.server.Stream) -> None: request = await stream.recv_message() request_kwargs = { \"did_uri\":", "\"\", ) -> \"RegisterIssuerResponse\": request = RegisterIssuerRequest() if did_uri: request.did_uri = did_uri if", "request.query, \"continuation_token\": request.continuation_token, \"options\": request.options, } response = await self.search_registry(**request_kwargs) await stream.send_message(response) async", "__rpc_unregister_issuer(self, stream: grpclib.server.Stream) -> None: request = await stream.recv_message() request_kwargs = { \"did_uri\":", "betterproto.string_field(1, group=\"authority\") x509_cert: str = betterproto.string_field(2, group=\"authority\") credential_type_uri: str = betterproto.string_field(10) valid_from_utc: int", "= did_uri if x509_cert: request.x509_cert = x509_cert request.presentation_type_uri = presentation_type_uri return await self._unary_unary(", "if governance_framework is not None: request.governance_framework = governance_framework return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/AddFramework\", request,", "request = await stream.recv_message() request_kwargs = { \"query\": request.query, \"continuation_token\": request.continuation_token, \"options\": request.options,", "repr=False) class CheckVerifierStatusResponse(betterproto.Message): status: \"RegistrationStatus\" = betterproto.enum_field(1) @dataclass(eq=False, repr=False) class FetchDataRequest(betterproto.Message): governance_framework_uri: str", "grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def unregister_verifier( self, did_uri: str, x509_cert: str, presentation_type_uri: str, governance_framework_uri: str,", "valid_from_utc: int = betterproto.uint64_field(11) valid_until_utc: int = betterproto.uint64_field(12) governance_framework_uri: str = betterproto.string_field(20) @dataclass(eq=False,", "None: request = await stream.recv_message() request_kwargs = { \"governance_framework_uri\": request.governance_framework_uri, \"did_uri\": request.did_uri, \"x509_cert\":", "= did_uri if x509_cert: request.x509_cert = x509_cert request.presentation_type_uri = presentation_type_uri request.valid_from_utc = valid_from_utc", "\"governance_framework_uri\": request.governance_framework_uri, \"did_uri\": request.did_uri, \"x509_cert\": request.x509_cert, \"presentation_type_uri\": request.presentation_type_uri, } response = await self.check_verifier_status(**request_kwargs)", "= SearchRegistryRequest() request.query = query request.continuation_token = continuation_token if options is not None:", "betterproto.uint64_field(12) governance_framework_uri: str = betterproto.string_field(20) @dataclass(eq=False, repr=False) class RegisterIssuerResponse(betterproto.Message): status: \"__common_v1__.ResponseStatus\" = betterproto.enum_field(1)", "= 0, governance_framework_uri: str = \"\", ) -> \"RegisterVerifierResponse\": request = RegisterVerifierRequest() if", "governance_framework_uri: str, query: str ) -> AsyncIterator[\"FetchDataResponse\"]: raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def __rpc_add_framework(self, stream:", "= 0, valid_until_utc: int = 0, governance_framework_uri: str = \"\", ) -> \"RegisterVerifierResponse\":", "= await self.remove_framework(**request_kwargs) await stream.send_message(response) async def __rpc_search_registry(self, stream: grpclib.server.Stream) -> None: request", "str = betterproto.string_field(1) trust_registry_uri: str = betterproto.string_field(2) description: str = betterproto.string_field(3) @dataclass(eq=False, repr=False)", "dataclasses import dataclass from typing import AsyncIterator, Dict import betterproto from betterproto.grpc.grpclib_server import", "AddFrameworkResponse, ) async def remove_framework( self, *, governance_framework: \"GovernanceFramework\" = None ) ->", "options return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/SearchRegistry\", request, SearchRegistryResponse, ) async def register_issuer( self, *,", "grpclib.const.Handler( self.__rpc_unregister_issuer, grpclib.const.Cardinality.UNARY_UNARY, UnregisterIssuerRequest, UnregisterIssuerResponse, ), \"/services.trustregistry.v1.TrustRegistry/UnregisterVerifier\": grpclib.const.Handler( self.__rpc_unregister_verifier, grpclib.const.Cardinality.UNARY_UNARY, UnregisterVerifierRequest, UnregisterVerifierResponse, ),", "\"\", presentation_type_uri: str = \"\", ) -> \"CheckVerifierStatusResponse\": request = CheckVerifierStatusRequest() request.governance_framework_uri =", "governance_framework_uri: str = \"\", ) -> \"RegisterVerifierResponse\": request = RegisterVerifierRequest() if did_uri: request.did_uri", "\"\" ) -> AsyncIterator[\"FetchDataResponse\"]: request = FetchDataRequest() request.governance_framework_uri = governance_framework_uri request.query = query", "= await self.unregister_verifier(**request_kwargs) await stream.send_message(response) async def __rpc_check_issuer_status(self, stream: grpclib.server.Stream) -> None: request", "\"/services.trustregistry.v1.TrustRegistry/FetchData\": grpclib.const.Handler( self.__rpc_fetch_data, grpclib.const.Cardinality.UNARY_STREAM, FetchDataRequest, FetchDataResponse, ), } from ...common import v1 as", "did_uri: str = \"\", x509_cert: str = \"\", credential_type_uri: str = \"\", valid_from_utc:", "betterproto.string_field(20) @dataclass(eq=False, repr=False) class RegisterIssuerResponse(betterproto.Message): status: \"__common_v1__.ResponseStatus\" = betterproto.enum_field(1) @dataclass(eq=False, repr=False) class RegisterVerifierRequest(betterproto.Message):", "= betterproto.string_field(2, group=\"authority\") credential_type_uri: str = betterproto.string_field(10) valid_from_utc: int = betterproto.uint64_field(11) valid_until_utc: int", "} response = await self.register_issuer(**request_kwargs) await stream.send_message(response) async def __rpc_register_verifier(self, stream: grpclib.server.Stream) ->", "def __rpc_unregister_issuer(self, stream: grpclib.server.Stream) -> None: request = await stream.recv_message() request_kwargs = {", "governance_framework_uri: str = betterproto.string_field(1) trust_registry_uri: str = betterproto.string_field(2) description: str = betterproto.string_field(3) @dataclass(eq=False,", "str, presentation_type_uri: str, valid_from_utc: int, valid_until_utc: int, governance_framework_uri: str, ) -> \"RegisterVerifierResponse\": raise", "x509_cert request.presentation_type_uri = presentation_type_uri request.valid_from_utc = valid_from_utc request.valid_until_utc = valid_until_utc request.governance_framework_uri = governance_framework_uri", "did_uri if x509_cert: request.x509_cert = x509_cert request.presentation_type_uri = presentation_type_uri request.governance_framework_uri = governance_framework_uri return", "self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/CheckIssuerStatus\", request, CheckIssuerStatusResponse, ) async def check_verifier_status( self, *, governance_framework_uri: str =", "not None: request.options = options return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/SearchRegistry\", request, SearchRegistryResponse, ) async", "request.query, } await self._call_rpc_handler_server_stream( self.fetch_data, stream, request_kwargs, ) def __mapping__(self) -> Dict[str, grpclib.const.Handler]:", "request.x509_cert, \"credential_type_uri\": request.credential_type_uri, \"valid_from_utc\": request.valid_from_utc, \"valid_until_utc\": request.valid_until_utc, \"governance_framework_uri\": request.governance_framework_uri, } response = await", "credential_type_uri: str = betterproto.string_field(10) valid_from_utc: int = betterproto.uint64_field(11) valid_until_utc: int = betterproto.uint64_field(12) governance_framework_uri:", ") -> \"UnregisterVerifierResponse\": request = UnregisterVerifierRequest() if did_uri: request.did_uri = did_uri if x509_cert:", "@dataclass(eq=False, repr=False) class RegisterVerifierResponse(betterproto.Message): status: \"__common_v1__.ResponseStatus\" = betterproto.enum_field(1) @dataclass(eq=False, repr=False) class UnregisterIssuerRequest(betterproto.Message): did_uri:", "status: \"RegistrationStatus\" = betterproto.enum_field(1) @dataclass(eq=False, repr=False) class CheckVerifierStatusRequest(betterproto.Message): governance_framework_uri: str = betterproto.string_field(1) did_uri:", "\"\", ) -> \"CheckVerifierStatusResponse\": request = CheckVerifierStatusRequest() request.governance_framework_uri = governance_framework_uri if did_uri: request.did_uri", "x509_cert: str, presentation_type_uri: str, valid_from_utc: int, valid_until_utc: int, governance_framework_uri: str, ) -> \"RegisterVerifierResponse\":", "governance_framework_uri: str = betterproto.string_field(20) @dataclass(eq=False, repr=False) class RegisterVerifierResponse(betterproto.Message): status: \"__common_v1__.ResponseStatus\" = betterproto.enum_field(1) @dataclass(eq=False,", "-> Dict[str, grpclib.const.Handler]: return { \"/services.trustregistry.v1.TrustRegistry/AddFramework\": grpclib.const.Handler( self.__rpc_add_framework, grpclib.const.Cardinality.UNARY_UNARY, AddFrameworkRequest, AddFrameworkResponse, ), \"/services.trustregistry.v1.TrustRegistry/RemoveFramework\":", "grpclib.const.Handler( self.__rpc_check_verifier_status, grpclib.const.Cardinality.UNARY_UNARY, CheckVerifierStatusRequest, CheckVerifierStatusResponse, ), \"/services.trustregistry.v1.TrustRegistry/FetchData\": grpclib.const.Handler( self.__rpc_fetch_data, grpclib.const.Cardinality.UNARY_STREAM, FetchDataRequest, FetchDataResponse, ),", "grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def check_issuer_status( self, governance_framework_uri: str, did_uri: str, x509_cert: str, credential_type_uri: str,", "did_uri: str, x509_cert: str, credential_type_uri: str, ) -> \"CheckIssuerStatusResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def", "credential_type_uri: str = \"\", valid_from_utc: int = 0, valid_until_utc: int = 0, governance_framework_uri:", "x509_cert request.credential_type_uri = credential_type_uri request.valid_from_utc = valid_from_utc request.valid_until_utc = valid_until_utc request.governance_framework_uri = governance_framework_uri", "grpclib.const.Cardinality.UNARY_UNARY, AddFrameworkRequest, AddFrameworkResponse, ), \"/services.trustregistry.v1.TrustRegistry/RemoveFramework\": grpclib.const.Handler( self.__rpc_remove_framework, grpclib.const.Cardinality.UNARY_UNARY, RemoveFrameworkRequest, RemoveFrameworkResponse, ), \"/services.trustregistry.v1.TrustRegistry/SearchRegistry\": grpclib.const.Handler(", "from typing import AsyncIterator, Dict import betterproto from betterproto.grpc.grpclib_server import ServiceBase import grpclib", "await self.register_verifier(**request_kwargs) await stream.send_message(response) async def __rpc_unregister_issuer(self, stream: grpclib.server.Stream) -> None: request =", "= \"\", valid_from_utc: int = 0, valid_until_utc: int = 0, governance_framework_uri: str =", "= UnregisterIssuerRequest() if did_uri: request.did_uri = did_uri if x509_cert: request.x509_cert = x509_cert request.credential_type_uri", "betterproto.message_field(5) @dataclass(eq=False, repr=False) class SearchRegistryResponse(betterproto.Message): items_json: str = betterproto.string_field(1) has_more: bool = betterproto.bool_field(2)", "check_issuer_status( self, *, governance_framework_uri: str = \"\", did_uri: str = \"\", x509_cert: str", "__mapping__(self) -> Dict[str, grpclib.const.Handler]: return { \"/services.trustregistry.v1.TrustRegistry/AddFramework\": grpclib.const.Handler( self.__rpc_add_framework, grpclib.const.Cardinality.UNARY_UNARY, AddFrameworkRequest, AddFrameworkResponse, ),", "request.governance_framework_uri, } response = await self.register_verifier(**request_kwargs) await stream.send_message(response) async def __rpc_unregister_issuer(self, stream: grpclib.server.Stream)", "-> \"RemoveFrameworkResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def search_registry( self, query: str, continuation_token: str, options:", "response = await self.register_verifier(**request_kwargs) await stream.send_message(response) async def __rpc_unregister_issuer(self, stream: grpclib.server.Stream) -> None:", "if options is not None: request.options = options return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/SearchRegistry\", request,", "raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def unregister_issuer( self, did_uri: str, x509_cert: str, credential_type_uri: str, governance_framework_uri:", "\"x509_cert\": request.x509_cert, \"credential_type_uri\": request.credential_type_uri, \"valid_from_utc\": request.valid_from_utc, \"valid_until_utc\": request.valid_until_utc, \"governance_framework_uri\": request.governance_framework_uri, } response =", "SearchRegistryResponse(betterproto.Message): items_json: str = betterproto.string_field(1) has_more: bool = betterproto.bool_field(2) count: int = betterproto.int32_field(3)", "request.x509_cert = x509_cert request.credential_type_uri = credential_type_uri return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/CheckIssuerStatus\", request, CheckIssuerStatusResponse, )", "group=\"authority\") presentation_type_uri: str = betterproto.string_field(10) governance_framework_uri: str = betterproto.string_field(20) @dataclass(eq=False, repr=False) class UnregisterVerifierResponse(betterproto.Message):", "-> \"AddFrameworkResponse\": request = AddFrameworkRequest() if governance_framework is not None: request.governance_framework = governance_framework", "= betterproto.string_field(10) governance_framework_uri: str = betterproto.string_field(20) @dataclass(eq=False, repr=False) class UnregisterVerifierResponse(betterproto.Message): status: \"__common_v1__.ResponseStatus\" =", "governance_framework_uri return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/UnregisterIssuer\", request, UnregisterIssuerResponse, ) async def unregister_verifier( self, *,", "await stream.recv_message() request_kwargs = { \"did_uri\": request.did_uri, \"x509_cert\": request.x509_cert, \"presentation_type_uri\": request.presentation_type_uri, \"valid_from_utc\": request.valid_from_utc,", "status: \"__common_v1__.ResponseStatus\" = betterproto.enum_field(1) @dataclass(eq=False, repr=False) class SearchRegistryRequest(betterproto.Message): query: str = betterproto.string_field(1) continuation_token:", "repr=False) class RemoveFrameworkRequest(betterproto.Message): governance_framework: \"GovernanceFramework\" = betterproto.message_field(1) @dataclass(eq=False, repr=False) class RemoveFrameworkResponse(betterproto.Message): status: \"__common_v1__.ResponseStatus\"", "check_verifier_status( self, *, governance_framework_uri: str = \"\", did_uri: str = \"\", x509_cert: str", "RegisterIssuerRequest(betterproto.Message): did_uri: str = betterproto.string_field(1, group=\"authority\") x509_cert: str = betterproto.string_field(2, group=\"authority\") credential_type_uri: str", "betterproto.string_field(20) @dataclass(eq=False, repr=False) class RegisterVerifierResponse(betterproto.Message): status: \"__common_v1__.ResponseStatus\" = betterproto.enum_field(1) @dataclass(eq=False, repr=False) class UnregisterIssuerRequest(betterproto.Message):", "} response = await self.register_verifier(**request_kwargs) await stream.send_message(response) async def __rpc_unregister_issuer(self, stream: grpclib.server.Stream) ->", "\"__common_v1__.ResponseStatus\" = betterproto.enum_field(1) @dataclass(eq=False, repr=False) class CheckIssuerStatusRequest(betterproto.Message): governance_framework_uri: str = betterproto.string_field(1) did_uri: str", "\"RegistrationStatus\" = betterproto.enum_field(1) @dataclass(eq=False, repr=False) class FetchDataRequest(betterproto.Message): governance_framework_uri: str = betterproto.string_field(1) query: str", "repr=False) class AddFrameworkRequest(betterproto.Message): governance_framework: \"GovernanceFramework\" = betterproto.message_field(1) @dataclass(eq=False, repr=False) class AddFrameworkResponse(betterproto.Message): status: \"__common_v1__.ResponseStatus\"", "= x509_cert request.presentation_type_uri = presentation_type_uri request.governance_framework_uri = governance_framework_uri return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/UnregisterVerifier\", request,", "betterproto.string_field(1) query: str = betterproto.string_field(2) @dataclass(eq=False, repr=False) class FetchDataResponse(betterproto.Message): response_json: str = betterproto.string_field(1)", ") -> \"SearchRegistryResponse\": request = SearchRegistryRequest() request.query = query request.continuation_token = continuation_token if", "search_registry( self, query: str, continuation_token: str, options: \"__common_v1__.RequestOptions\", ) -> \"SearchRegistryResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)", "query: str = \"\" ) -> AsyncIterator[\"FetchDataResponse\"]: request = FetchDataRequest() request.governance_framework_uri = governance_framework_uri", "governance_framework_uri: str = betterproto.string_field(20) @dataclass(eq=False, repr=False) class UnregisterIssuerResponse(betterproto.Message): status: \"__common_v1__.ResponseStatus\" = betterproto.enum_field(1) @dataclass(eq=False,", "), \"/services.trustregistry.v1.TrustRegistry/UnregisterIssuer\": grpclib.const.Handler( self.__rpc_unregister_issuer, grpclib.const.Cardinality.UNARY_UNARY, UnregisterIssuerRequest, UnregisterIssuerResponse, ), \"/services.trustregistry.v1.TrustRegistry/UnregisterVerifier\": grpclib.const.Handler( self.__rpc_unregister_verifier, grpclib.const.Cardinality.UNARY_UNARY, UnregisterVerifierRequest,", "await self.register_issuer(**request_kwargs) await stream.send_message(response) async def __rpc_register_verifier(self, stream: grpclib.server.Stream) -> None: request =", "str, query: str ) -> AsyncIterator[\"FetchDataResponse\"]: raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def __rpc_add_framework(self, stream: grpclib.server.Stream)", "str = betterproto.string_field(1) has_more: bool = betterproto.bool_field(2) count: int = betterproto.int32_field(3) continuation_token: str", "did_uri: str = betterproto.string_field(2, group=\"member\") x509_cert: str = betterproto.string_field(3, group=\"member\") credential_type_uri: str =", "-> \"UnregisterVerifierResponse\": request = UnregisterVerifierRequest() if did_uri: request.did_uri = did_uri if x509_cert: request.x509_cert", "async def unregister_issuer( self, *, did_uri: str = \"\", x509_cert: str = \"\",", "\"\", x509_cert: str = \"\", credential_type_uri: str = \"\", governance_framework_uri: str = \"\",", "x509_cert: str, credential_type_uri: str, ) -> \"CheckIssuerStatusResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def check_verifier_status( self,", "presentation_type_uri: str = \"\", ) -> \"CheckVerifierStatusResponse\": request = CheckVerifierStatusRequest() request.governance_framework_uri = governance_framework_uri", "valid_from_utc: int, valid_until_utc: int, governance_framework_uri: str, ) -> \"RegisterVerifierResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def", "class CheckVerifierStatusResponse(betterproto.Message): status: \"RegistrationStatus\" = betterproto.enum_field(1) @dataclass(eq=False, repr=False) class FetchDataRequest(betterproto.Message): governance_framework_uri: str =", "return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/RegisterIssuer\", request, RegisterIssuerResponse, ) async def register_verifier( self, *, did_uri:", "= betterproto.int32_field(3) continuation_token: str = betterproto.string_field(4) @dataclass(eq=False, repr=False) class GovernanceFramework(betterproto.Message): governance_framework_uri: str =", "async def check_verifier_status( self, *, governance_framework_uri: str = \"\", did_uri: str = \"\",", "request, CheckIssuerStatusResponse, ) async def check_verifier_status( self, *, governance_framework_uri: str = \"\", did_uri:", "return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/UnregisterVerifier\", request, UnregisterVerifierResponse, ) async def check_issuer_status( self, *, governance_framework_uri:", "await stream.recv_message() request_kwargs = { \"did_uri\": request.did_uri, \"x509_cert\": request.x509_cert, \"credential_type_uri\": request.credential_type_uri, \"valid_from_utc\": request.valid_from_utc,", "str = betterproto.string_field(4) @dataclass(eq=False, repr=False) class CheckVerifierStatusResponse(betterproto.Message): status: \"RegistrationStatus\" = betterproto.enum_field(1) @dataclass(eq=False, repr=False)", "x509_cert: str = betterproto.string_field(2, group=\"authority\") credential_type_uri: str = betterproto.string_field(10) governance_framework_uri: str = betterproto.string_field(20)", "= 1 TERMINATED = 2 REVOKED = 3 NOT_FOUND = 10 @dataclass(eq=False, repr=False)", "betterproto.string_field(3) @dataclass(eq=False, repr=False) class RegisterIssuerRequest(betterproto.Message): did_uri: str = betterproto.string_field(1, group=\"authority\") x509_cert: str =", "= \"\", x509_cert: str = \"\", presentation_type_uri: str = \"\", ) -> \"CheckVerifierStatusResponse\":", "self.__rpc_remove_framework, grpclib.const.Cardinality.UNARY_UNARY, RemoveFrameworkRequest, RemoveFrameworkResponse, ), \"/services.trustregistry.v1.TrustRegistry/SearchRegistry\": grpclib.const.Handler( self.__rpc_search_registry, grpclib.const.Cardinality.UNARY_UNARY, SearchRegistryRequest, SearchRegistryResponse, ), \"/services.trustregistry.v1.TrustRegistry/RegisterIssuer\":", "async def fetch_data( self, *, governance_framework_uri: str = \"\", query: str = \"\"", "request.x509_cert, \"credential_type_uri\": request.credential_type_uri, \"governance_framework_uri\": request.governance_framework_uri, } response = await self.unregister_issuer(**request_kwargs) await stream.send_message(response) async", "is not None: request.options = options return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/SearchRegistry\", request, SearchRegistryResponse, )", "*, query: str = \"\", continuation_token: str = \"\", options: \"__common_v1__.RequestOptions\" = None,", "), \"/services.trustregistry.v1.TrustRegistry/SearchRegistry\": grpclib.const.Handler( self.__rpc_search_registry, grpclib.const.Cardinality.UNARY_UNARY, SearchRegistryRequest, SearchRegistryResponse, ), \"/services.trustregistry.v1.TrustRegistry/RegisterIssuer\": grpclib.const.Handler( self.__rpc_register_issuer, grpclib.const.Cardinality.UNARY_UNARY, RegisterIssuerRequest,", "FetchDataRequest(betterproto.Message): governance_framework_uri: str = betterproto.string_field(1) query: str = betterproto.string_field(2) @dataclass(eq=False, repr=False) class FetchDataResponse(betterproto.Message):", "request.governance_framework_uri = governance_framework_uri return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/UnregisterVerifier\", request, UnregisterVerifierResponse, ) async def check_issuer_status(", "= did_uri if x509_cert: request.x509_cert = x509_cert request.credential_type_uri = credential_type_uri request.governance_framework_uri = governance_framework_uri", "request.credential_type_uri, } response = await self.check_issuer_status(**request_kwargs) await stream.send_message(response) async def __rpc_check_verifier_status(self, stream: grpclib.server.Stream)", "group=\"member\") presentation_type_uri: str = betterproto.string_field(4) @dataclass(eq=False, repr=False) class CheckVerifierStatusResponse(betterproto.Message): status: \"RegistrationStatus\" = betterproto.enum_field(1)", "request.x509_cert = x509_cert request.credential_type_uri = credential_type_uri request.valid_from_utc = valid_from_utc request.valid_until_utc = valid_until_utc request.governance_framework_uri", "grpclib.const.Cardinality.UNARY_UNARY, SearchRegistryRequest, SearchRegistryResponse, ), \"/services.trustregistry.v1.TrustRegistry/RegisterIssuer\": grpclib.const.Handler( self.__rpc_register_issuer, grpclib.const.Cardinality.UNARY_UNARY, RegisterIssuerRequest, RegisterIssuerResponse, ), \"/services.trustregistry.v1.TrustRegistry/RegisterVerifier\": grpclib.const.Handler(", "= \"\", x509_cert: str = \"\", credential_type_uri: str = \"\", governance_framework_uri: str =", "def fetch_data( self, governance_framework_uri: str, query: str ) -> AsyncIterator[\"FetchDataResponse\"]: raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async", "= \"\", x509_cert: str = \"\", credential_type_uri: str = \"\", valid_from_utc: int =", "request, RemoveFrameworkResponse, ) async def search_registry( self, *, query: str = \"\", continuation_token:", "async def __rpc_unregister_verifier(self, stream: grpclib.server.Stream) -> None: request = await stream.recv_message() request_kwargs =", "self, governance_framework: \"GovernanceFramework\" ) -> \"AddFrameworkResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def remove_framework( self, governance_framework:", "\"RegistrationStatus\" = betterproto.enum_field(1) @dataclass(eq=False, repr=False) class CheckVerifierStatusRequest(betterproto.Message): governance_framework_uri: str = betterproto.string_field(1) did_uri: str", "= \"\", ) -> \"UnregisterIssuerResponse\": request = UnregisterIssuerRequest() if did_uri: request.did_uri = did_uri", "CheckVerifierStatusRequest() request.governance_framework_uri = governance_framework_uri if did_uri: request.did_uri = did_uri if x509_cert: request.x509_cert =", "\"credential_type_uri\": request.credential_type_uri, \"governance_framework_uri\": request.governance_framework_uri, } response = await self.unregister_issuer(**request_kwargs) await stream.send_message(response) async def", "= betterproto.string_field(20) @dataclass(eq=False, repr=False) class RegisterVerifierResponse(betterproto.Message): status: \"__common_v1__.ResponseStatus\" = betterproto.enum_field(1) @dataclass(eq=False, repr=False) class", ") async def check_issuer_status( self, *, governance_framework_uri: str = \"\", did_uri: str =", "await stream.send_message(response) async def __rpc_unregister_issuer(self, stream: grpclib.server.Stream) -> None: request = await stream.recv_message()", "str = \"\", ) -> \"CheckIssuerStatusResponse\": request = CheckIssuerStatusRequest() request.governance_framework_uri = governance_framework_uri if", "\"/services.trustregistry.v1.TrustRegistry/CheckVerifierStatus\", request, CheckVerifierStatusResponse, ) async def fetch_data( self, *, governance_framework_uri: str = \"\",", "= presentation_type_uri request.valid_from_utc = valid_from_utc request.valid_until_utc = valid_until_utc request.governance_framework_uri = governance_framework_uri return await", "await stream.send_message(response) async def __rpc_fetch_data(self, stream: grpclib.server.Stream) -> None: request = await stream.recv_message()", "raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def check_verifier_status( self, governance_framework_uri: str, did_uri: str, x509_cert: str, presentation_type_uri:", "did_uri if x509_cert: request.x509_cert = x509_cert request.presentation_type_uri = presentation_type_uri request.valid_from_utc = valid_from_utc request.valid_until_utc", "request = AddFrameworkRequest() if governance_framework is not None: request.governance_framework = governance_framework return await", "import grpclib class RegistrationStatus(betterproto.Enum): CURRENT = 0 EXPIRED = 1 TERMINATED = 2", ") async def unregister_verifier( self, *, did_uri: str = \"\", x509_cert: str =", "request.presentation_type_uri, \"governance_framework_uri\": request.governance_framework_uri, } response = await self.unregister_verifier(**request_kwargs) await stream.send_message(response) async def __rpc_check_issuer_status(self,", "*, governance_framework_uri: str = \"\", did_uri: str = \"\", x509_cert: str = \"\",", "\"\", query: str = \"\" ) -> AsyncIterator[\"FetchDataResponse\"]: request = FetchDataRequest() request.governance_framework_uri =", "request.x509_cert = x509_cert request.credential_type_uri = credential_type_uri request.governance_framework_uri = governance_framework_uri return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/UnregisterIssuer\",", "fetch_data( self, governance_framework_uri: str, query: str ) -> AsyncIterator[\"FetchDataResponse\"]: raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def", "request = CheckIssuerStatusRequest() request.governance_framework_uri = governance_framework_uri if did_uri: request.did_uri = did_uri if x509_cert:", "\"\", presentation_type_uri: str = \"\", governance_framework_uri: str = \"\", ) -> \"UnregisterVerifierResponse\": request", "if x509_cert: request.x509_cert = x509_cert request.credential_type_uri = credential_type_uri return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/CheckIssuerStatus\", request,", "@dataclass(eq=False, repr=False) class RegisterIssuerRequest(betterproto.Message): did_uri: str = betterproto.string_field(1, group=\"authority\") x509_cert: str = betterproto.string_field(2,", "} response = await self.remove_framework(**request_kwargs) await stream.send_message(response) async def __rpc_search_registry(self, stream: grpclib.server.Stream) ->", "\"\", did_uri: str = \"\", x509_cert: str = \"\", presentation_type_uri: str = \"\",", "RemoveFrameworkResponse, ), \"/services.trustregistry.v1.TrustRegistry/SearchRegistry\": grpclib.const.Handler( self.__rpc_search_registry, grpclib.const.Cardinality.UNARY_UNARY, SearchRegistryRequest, SearchRegistryResponse, ), \"/services.trustregistry.v1.TrustRegistry/RegisterIssuer\": grpclib.const.Handler( self.__rpc_register_issuer, grpclib.const.Cardinality.UNARY_UNARY,", "CheckIssuerStatusRequest, CheckIssuerStatusResponse, ), \"/services.trustregistry.v1.TrustRegistry/CheckVerifierStatus\": grpclib.const.Handler( self.__rpc_check_verifier_status, grpclib.const.Cardinality.UNARY_UNARY, CheckVerifierStatusRequest, CheckVerifierStatusResponse, ), \"/services.trustregistry.v1.TrustRegistry/FetchData\": grpclib.const.Handler( self.__rpc_fetch_data,", "0, valid_until_utc: int = 0, governance_framework_uri: str = \"\", ) -> \"RegisterIssuerResponse\": request", "str = betterproto.string_field(4) @dataclass(eq=False, repr=False) class CheckIssuerStatusResponse(betterproto.Message): status: \"RegistrationStatus\" = betterproto.enum_field(1) @dataclass(eq=False, repr=False)", "stream.recv_message() request_kwargs = { \"governance_framework_uri\": request.governance_framework_uri, \"did_uri\": request.did_uri, \"x509_cert\": request.x509_cert, \"credential_type_uri\": request.credential_type_uri, }", "UnregisterVerifierResponse, ) async def check_issuer_status( self, *, governance_framework_uri: str = \"\", did_uri: str", "async def __rpc_fetch_data(self, stream: grpclib.server.Stream) -> None: request = await stream.recv_message() request_kwargs =", "response = await self.check_issuer_status(**request_kwargs) await stream.send_message(response) async def __rpc_check_verifier_status(self, stream: grpclib.server.Stream) -> None:", "AddFrameworkResponse, ), \"/services.trustregistry.v1.TrustRegistry/RemoveFramework\": grpclib.const.Handler( self.__rpc_remove_framework, grpclib.const.Cardinality.UNARY_UNARY, RemoveFrameworkRequest, RemoveFrameworkResponse, ), \"/services.trustregistry.v1.TrustRegistry/SearchRegistry\": grpclib.const.Handler( self.__rpc_search_registry, grpclib.const.Cardinality.UNARY_UNARY,", "str = \"\" ) -> AsyncIterator[\"FetchDataResponse\"]: request = FetchDataRequest() request.governance_framework_uri = governance_framework_uri request.query", "betterproto.string_field(3) class TrustRegistryStub(betterproto.ServiceStub): async def add_framework( self, *, governance_framework: \"GovernanceFramework\" = None )", "int, valid_until_utc: int, governance_framework_uri: str, ) -> \"RegisterIssuerResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def register_verifier(", "= CheckVerifierStatusRequest() request.governance_framework_uri = governance_framework_uri if did_uri: request.did_uri = did_uri if x509_cert: request.x509_cert", "group=\"authority\") credential_type_uri: str = betterproto.string_field(10) governance_framework_uri: str = betterproto.string_field(20) @dataclass(eq=False, repr=False) class UnregisterIssuerResponse(betterproto.Message):", "= FetchDataRequest() request.governance_framework_uri = governance_framework_uri request.query = query async for response in self._unary_stream(", "= \"\", did_uri: str = \"\", x509_cert: str = \"\", presentation_type_uri: str =", "@dataclass(eq=False, repr=False) class UnregisterIssuerResponse(betterproto.Message): status: \"__common_v1__.ResponseStatus\" = betterproto.enum_field(1) @dataclass(eq=False, repr=False) class UnregisterVerifierRequest(betterproto.Message): did_uri:", "class SearchRegistryRequest(betterproto.Message): query: str = betterproto.string_field(1) continuation_token: str = betterproto.string_field(2) options: \"__common_v1__.RequestOptions\" =", "= valid_from_utc request.valid_until_utc = valid_until_utc request.governance_framework_uri = governance_framework_uri return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/RegisterIssuer\", request,", "betterproto.string_field(2, group=\"authority\") presentation_type_uri: str = betterproto.string_field(10) valid_from_utc: int = betterproto.uint64_field(11) valid_until_utc: int =", "query async for response in self._unary_stream( \"/services.trustregistry.v1.TrustRegistry/FetchData\", request, FetchDataResponse, ): yield response class", "int = betterproto.int32_field(3) continuation_token: str = betterproto.string_field(4) @dataclass(eq=False, repr=False) class GovernanceFramework(betterproto.Message): governance_framework_uri: str", "governance_framework_uri if did_uri: request.did_uri = did_uri if x509_cert: request.x509_cert = x509_cert request.presentation_type_uri =", "\"__common_v1__.ResponseStatus\" = betterproto.enum_field(1) @dataclass(eq=False, repr=False) class UnregisterIssuerRequest(betterproto.Message): did_uri: str = betterproto.string_field(1, group=\"authority\") x509_cert:", "if x509_cert: request.x509_cert = x509_cert request.presentation_type_uri = presentation_type_uri return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/CheckVerifierStatus\", request,", "str = \"\", x509_cert: str = \"\", credential_type_uri: str = \"\", governance_framework_uri: str", "betterproto.string_field(2, group=\"member\") x509_cert: str = betterproto.string_field(3, group=\"member\") presentation_type_uri: str = betterproto.string_field(4) @dataclass(eq=False, repr=False)", "def __rpc_remove_framework(self, stream: grpclib.server.Stream) -> None: request = await stream.recv_message() request_kwargs = {", "did_uri: str = \"\", x509_cert: str = \"\", credential_type_uri: str = \"\", governance_framework_uri:", "x509_cert: str = \"\", credential_type_uri: str = \"\", valid_from_utc: int = 0, valid_until_utc:", "RemoveFrameworkResponse, ) async def search_registry( self, *, query: str = \"\", continuation_token: str", "-> \"RegisterIssuerResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def register_verifier( self, did_uri: str, x509_cert: str, presentation_type_uri:", "int = 0, valid_until_utc: int = 0, governance_framework_uri: str = \"\", ) ->", "str = betterproto.string_field(4) @dataclass(eq=False, repr=False) class GovernanceFramework(betterproto.Message): governance_framework_uri: str = betterproto.string_field(1) trust_registry_uri: str", "= betterproto.enum_field(1) @dataclass(eq=False, repr=False) class UnregisterVerifierRequest(betterproto.Message): did_uri: str = betterproto.string_field(1, group=\"authority\") x509_cert: str", "async def __rpc_unregister_issuer(self, stream: grpclib.server.Stream) -> None: request = await stream.recv_message() request_kwargs =", "stream.send_message(response) async def __rpc_fetch_data(self, stream: grpclib.server.Stream) -> None: request = await stream.recv_message() request_kwargs", "= betterproto.message_field(1) @dataclass(eq=False, repr=False) class RemoveFrameworkResponse(betterproto.Message): status: \"__common_v1__.ResponseStatus\" = betterproto.enum_field(1) @dataclass(eq=False, repr=False) class", "grpclib.const.Handler( self.__rpc_remove_framework, grpclib.const.Cardinality.UNARY_UNARY, RemoveFrameworkRequest, RemoveFrameworkResponse, ), \"/services.trustregistry.v1.TrustRegistry/SearchRegistry\": grpclib.const.Handler( self.__rpc_search_registry, grpclib.const.Cardinality.UNARY_UNARY, SearchRegistryRequest, SearchRegistryResponse, ),", "return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/RemoveFramework\", request, RemoveFrameworkResponse, ) async def search_registry( self, *, query:", "= await self.search_registry(**request_kwargs) await stream.send_message(response) async def __rpc_register_issuer(self, stream: grpclib.server.Stream) -> None: request", "raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def unregister_verifier( self, did_uri: str, x509_cert: str, presentation_type_uri: str, governance_framework_uri:", "x509_cert: str, credential_type_uri: str, valid_from_utc: int, valid_until_utc: int, governance_framework_uri: str, ) -> \"RegisterIssuerResponse\":", "grpclib.const.Handler( self.__rpc_add_framework, grpclib.const.Cardinality.UNARY_UNARY, AddFrameworkRequest, AddFrameworkResponse, ), \"/services.trustregistry.v1.TrustRegistry/RemoveFramework\": grpclib.const.Handler( self.__rpc_remove_framework, grpclib.const.Cardinality.UNARY_UNARY, RemoveFrameworkRequest, RemoveFrameworkResponse, ),", "-> None: request = await stream.recv_message() request_kwargs = { \"governance_framework_uri\": request.governance_framework_uri, \"query\": request.query,", "did_uri: str = \"\", x509_cert: str = \"\", credential_type_uri: str = \"\", )", "} response = await self.unregister_verifier(**request_kwargs) await stream.send_message(response) async def __rpc_check_issuer_status(self, stream: grpclib.server.Stream) ->", "request.credential_type_uri = credential_type_uri return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/CheckIssuerStatus\", request, CheckIssuerStatusResponse, ) async def check_verifier_status(", "request.governance_framework = governance_framework return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/AddFramework\", request, AddFrameworkResponse, ) async def remove_framework(", "str, options: \"__common_v1__.RequestOptions\", ) -> \"SearchRegistryResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def register_issuer( self, did_uri:", "governance_framework: \"GovernanceFramework\" = betterproto.message_field(1) @dataclass(eq=False, repr=False) class RemoveFrameworkResponse(betterproto.Message): status: \"__common_v1__.ResponseStatus\" = betterproto.enum_field(1) @dataclass(eq=False,", "await stream.send_message(response) async def __rpc_remove_framework(self, stream: grpclib.server.Stream) -> None: request = await stream.recv_message()", "def __mapping__(self) -> Dict[str, grpclib.const.Handler]: return { \"/services.trustregistry.v1.TrustRegistry/AddFramework\": grpclib.const.Handler( self.__rpc_add_framework, grpclib.const.Cardinality.UNARY_UNARY, AddFrameworkRequest, AddFrameworkResponse,", "\"did_uri\": request.did_uri, \"x509_cert\": request.x509_cert, \"credential_type_uri\": request.credential_type_uri, } response = await self.check_issuer_status(**request_kwargs) await stream.send_message(response)", "await stream.recv_message() request_kwargs = { \"governance_framework_uri\": request.governance_framework_uri, \"query\": request.query, } await self._call_rpc_handler_server_stream( self.fetch_data,", "governance_framework_uri: str = \"\", did_uri: str = \"\", x509_cert: str = \"\", credential_type_uri:", "= \"\", query: str = \"\" ) -> AsyncIterator[\"FetchDataResponse\"]: request = FetchDataRequest() request.governance_framework_uri", "x509_cert: request.x509_cert = x509_cert request.credential_type_uri = credential_type_uri request.governance_framework_uri = governance_framework_uri return await self._unary_unary(", "\"CheckVerifierStatusResponse\": request = CheckVerifierStatusRequest() request.governance_framework_uri = governance_framework_uri if did_uri: request.did_uri = did_uri if", "\"presentation_type_uri\": request.presentation_type_uri, } response = await self.check_verifier_status(**request_kwargs) await stream.send_message(response) async def __rpc_fetch_data(self, stream:", "async def __rpc_add_framework(self, stream: grpclib.server.Stream) -> None: request = await stream.recv_message() request_kwargs =", ") -> AsyncIterator[\"FetchDataResponse\"]: request = FetchDataRequest() request.governance_framework_uri = governance_framework_uri request.query = query async", "\"/services.trustregistry.v1.TrustRegistry/SearchRegistry\", request, SearchRegistryResponse, ) async def register_issuer( self, *, did_uri: str = \"\",", "0 EXPIRED = 1 TERMINATED = 2 REVOKED = 3 NOT_FOUND = 10", "grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def search_registry( self, query: str, continuation_token: str, options: \"__common_v1__.RequestOptions\", ) ->", "request.valid_until_utc = valid_until_utc request.governance_framework_uri = governance_framework_uri return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/RegisterIssuer\", request, RegisterIssuerResponse, )", "= governance_framework_uri return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/UnregisterVerifier\", request, UnregisterVerifierResponse, ) async def check_issuer_status( self,", "= betterproto.string_field(1) query: str = betterproto.string_field(2) @dataclass(eq=False, repr=False) class FetchDataResponse(betterproto.Message): response_json: str =", "self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/RemoveFramework\", request, RemoveFrameworkResponse, ) async def search_registry( self, *, query: str =", "grpclib.const.Cardinality.UNARY_UNARY, UnregisterVerifierRequest, UnregisterVerifierResponse, ), \"/services.trustregistry.v1.TrustRegistry/CheckIssuerStatus\": grpclib.const.Handler( self.__rpc_check_issuer_status, grpclib.const.Cardinality.UNARY_UNARY, CheckIssuerStatusRequest, CheckIssuerStatusResponse, ), \"/services.trustregistry.v1.TrustRegistry/CheckVerifierStatus\": grpclib.const.Handler(", "await stream.send_message(response) async def __rpc_check_verifier_status(self, stream: grpclib.server.Stream) -> None: request = await stream.recv_message()", "CheckVerifierStatusRequest, CheckVerifierStatusResponse, ), \"/services.trustregistry.v1.TrustRegistry/FetchData\": grpclib.const.Handler( self.__rpc_fetch_data, grpclib.const.Cardinality.UNARY_STREAM, FetchDataRequest, FetchDataResponse, ), } from ...common", "\"/services.trustregistry.v1.TrustRegistry/AddFramework\": grpclib.const.Handler( self.__rpc_add_framework, grpclib.const.Cardinality.UNARY_UNARY, AddFrameworkRequest, AddFrameworkResponse, ), \"/services.trustregistry.v1.TrustRegistry/RemoveFramework\": grpclib.const.Handler( self.__rpc_remove_framework, grpclib.const.Cardinality.UNARY_UNARY, RemoveFrameworkRequest, RemoveFrameworkResponse,", "CheckIssuerStatusRequest() request.governance_framework_uri = governance_framework_uri if did_uri: request.did_uri = did_uri if x509_cert: request.x509_cert =", "query: str ) -> AsyncIterator[\"FetchDataResponse\"]: raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def __rpc_add_framework(self, stream: grpclib.server.Stream) ->", "class UnregisterVerifierRequest(betterproto.Message): did_uri: str = betterproto.string_field(1, group=\"authority\") x509_cert: str = betterproto.string_field(2, group=\"authority\") presentation_type_uri:", "status: \"__common_v1__.ResponseStatus\" = betterproto.enum_field(1) @dataclass(eq=False, repr=False) class CheckIssuerStatusRequest(betterproto.Message): governance_framework_uri: str = betterproto.string_field(1) did_uri:", "\"valid_from_utc\": request.valid_from_utc, \"valid_until_utc\": request.valid_until_utc, \"governance_framework_uri\": request.governance_framework_uri, } response = await self.register_issuer(**request_kwargs) await stream.send_message(response)", "\"valid_until_utc\": request.valid_until_utc, \"governance_framework_uri\": request.governance_framework_uri, } response = await self.register_issuer(**request_kwargs) await stream.send_message(response) async def", "= \"\", x509_cert: str = \"\", credential_type_uri: str = \"\", ) -> \"CheckIssuerStatusResponse\":", "grpclib.const.Handler( self.__rpc_register_verifier, grpclib.const.Cardinality.UNARY_UNARY, RegisterVerifierRequest, RegisterVerifierResponse, ), \"/services.trustregistry.v1.TrustRegistry/UnregisterIssuer\": grpclib.const.Handler( self.__rpc_unregister_issuer, grpclib.const.Cardinality.UNARY_UNARY, UnregisterIssuerRequest, UnregisterIssuerResponse, ),", "RegisterIssuerResponse(betterproto.Message): status: \"__common_v1__.ResponseStatus\" = betterproto.enum_field(1) @dataclass(eq=False, repr=False) class RegisterVerifierRequest(betterproto.Message): did_uri: str = betterproto.string_field(1,", "governance_framework_uri: str = betterproto.string_field(1) did_uri: str = betterproto.string_field(2, group=\"member\") x509_cert: str = betterproto.string_field(3,", "*, did_uri: str = \"\", x509_cert: str = \"\", presentation_type_uri: str = \"\",", "class TrustRegistryStub(betterproto.ServiceStub): async def add_framework( self, *, governance_framework: \"GovernanceFramework\" = None ) ->", "if x509_cert: request.x509_cert = x509_cert request.presentation_type_uri = presentation_type_uri request.governance_framework_uri = governance_framework_uri return await", "async def register_issuer( self, *, did_uri: str = \"\", x509_cert: str = \"\",", "class RegisterIssuerRequest(betterproto.Message): did_uri: str = betterproto.string_field(1, group=\"authority\") x509_cert: str = betterproto.string_field(2, group=\"authority\") credential_type_uri:", "@dataclass(eq=False, repr=False) class RegisterIssuerResponse(betterproto.Message): status: \"__common_v1__.ResponseStatus\" = betterproto.enum_field(1) @dataclass(eq=False, repr=False) class RegisterVerifierRequest(betterproto.Message): did_uri:", "UnregisterIssuerResponse, ) async def unregister_verifier( self, *, did_uri: str = \"\", x509_cert: str", "did_uri: request.did_uri = did_uri if x509_cert: request.x509_cert = x509_cert request.credential_type_uri = credential_type_uri return", "str = \"\", governance_framework_uri: str = \"\", ) -> \"UnregisterVerifierResponse\": request = UnregisterVerifierRequest()", "request.did_uri, \"x509_cert\": request.x509_cert, \"credential_type_uri\": request.credential_type_uri, \"valid_from_utc\": request.valid_from_utc, \"valid_until_utc\": request.valid_until_utc, \"governance_framework_uri\": request.governance_framework_uri, } response", "stream: grpclib.server.Stream) -> None: request = await stream.recv_message() request_kwargs = { \"query\": request.query,", "status: \"__common_v1__.ResponseStatus\" = betterproto.enum_field(1) @dataclass(eq=False, repr=False) class RegisterVerifierRequest(betterproto.Message): did_uri: str = betterproto.string_field(1, group=\"authority\")", "CheckIssuerStatusRequest(betterproto.Message): governance_framework_uri: str = betterproto.string_field(1) did_uri: str = betterproto.string_field(2, group=\"member\") x509_cert: str =", "RegisterVerifierRequest() if did_uri: request.did_uri = did_uri if x509_cert: request.x509_cert = x509_cert request.presentation_type_uri =", "register_issuer( self, *, did_uri: str = \"\", x509_cert: str = \"\", credential_type_uri: str", "def register_issuer( self, *, did_uri: str = \"\", x509_cert: str = \"\", credential_type_uri:", "governance_framework_uri: str = \"\", did_uri: str = \"\", x509_cert: str = \"\", presentation_type_uri:", "str, ) -> \"RegisterVerifierResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def unregister_issuer( self, did_uri: str, x509_cert:", "grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def check_verifier_status( self, governance_framework_uri: str, did_uri: str, x509_cert: str, presentation_type_uri: str,", "= await stream.recv_message() request_kwargs = { \"governance_framework\": request.governance_framework, } response = await self.remove_framework(**request_kwargs)", "stream.send_message(response) async def __rpc_unregister_verifier(self, stream: grpclib.server.Stream) -> None: request = await stream.recv_message() request_kwargs", "GovernanceFramework(betterproto.Message): governance_framework_uri: str = betterproto.string_field(1) trust_registry_uri: str = betterproto.string_field(2) description: str = betterproto.string_field(3)", "), \"/services.trustregistry.v1.TrustRegistry/RegisterVerifier\": grpclib.const.Handler( self.__rpc_register_verifier, grpclib.const.Cardinality.UNARY_UNARY, RegisterVerifierRequest, RegisterVerifierResponse, ), \"/services.trustregistry.v1.TrustRegistry/UnregisterIssuer\": grpclib.const.Handler( self.__rpc_unregister_issuer, grpclib.const.Cardinality.UNARY_UNARY, UnregisterIssuerRequest,", "grpclib.const.Cardinality.UNARY_UNARY, CheckVerifierStatusRequest, CheckVerifierStatusResponse, ), \"/services.trustregistry.v1.TrustRegistry/FetchData\": grpclib.const.Handler( self.__rpc_fetch_data, grpclib.const.Cardinality.UNARY_STREAM, FetchDataRequest, FetchDataResponse, ), } from", "= betterproto.string_field(3) class TrustRegistryStub(betterproto.ServiceStub): async def add_framework( self, *, governance_framework: \"GovernanceFramework\" = None", "remove_framework( self, *, governance_framework: \"GovernanceFramework\" = None ) -> \"RemoveFrameworkResponse\": request = RemoveFrameworkRequest()", "\"UnregisterVerifierResponse\": request = UnregisterVerifierRequest() if did_uri: request.did_uri = did_uri if x509_cert: request.x509_cert =", "str = betterproto.string_field(2) options: \"__common_v1__.RequestOptions\" = betterproto.message_field(5) @dataclass(eq=False, repr=False) class SearchRegistryResponse(betterproto.Message): items_json: str", "-> \"CheckVerifierStatusResponse\": request = CheckVerifierStatusRequest() request.governance_framework_uri = governance_framework_uri if did_uri: request.did_uri = did_uri", "betterproto.string_field(1, group=\"authority\") x509_cert: str = betterproto.string_field(2, group=\"authority\") presentation_type_uri: str = betterproto.string_field(10) valid_from_utc: int", "@dataclass(eq=False, repr=False) class FetchDataRequest(betterproto.Message): governance_framework_uri: str = betterproto.string_field(1) query: str = betterproto.string_field(2) @dataclass(eq=False,", "from betterproto.grpc.grpclib_server import ServiceBase import grpclib class RegistrationStatus(betterproto.Enum): CURRENT = 0 EXPIRED =", "str, valid_from_utc: int, valid_until_utc: int, governance_framework_uri: str, ) -> \"RegisterIssuerResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async", "continuation_token: str = betterproto.string_field(3) class TrustRegistryStub(betterproto.ServiceStub): async def add_framework( self, *, governance_framework: \"GovernanceFramework\"", "= betterproto.string_field(2, group=\"authority\") presentation_type_uri: str = betterproto.string_field(10) valid_from_utc: int = betterproto.uint64_field(11) valid_until_utc: int", "str = betterproto.string_field(10) governance_framework_uri: str = betterproto.string_field(20) @dataclass(eq=False, repr=False) class UnregisterIssuerResponse(betterproto.Message): status: \"__common_v1__.ResponseStatus\"", "str = \"\", x509_cert: str = \"\", credential_type_uri: str = \"\", valid_from_utc: int", "RemoveFrameworkRequest(betterproto.Message): governance_framework: \"GovernanceFramework\" = betterproto.message_field(1) @dataclass(eq=False, repr=False) class RemoveFrameworkResponse(betterproto.Message): status: \"__common_v1__.ResponseStatus\" = betterproto.enum_field(1)", "str = \"\", ) -> \"UnregisterIssuerResponse\": request = UnregisterIssuerRequest() if did_uri: request.did_uri =", "betterproto.enum_field(1) @dataclass(eq=False, repr=False) class CheckIssuerStatusRequest(betterproto.Message): governance_framework_uri: str = betterproto.string_field(1) did_uri: str = betterproto.string_field(2,", "CheckVerifierStatusRequest(betterproto.Message): governance_framework_uri: str = betterproto.string_field(1) did_uri: str = betterproto.string_field(2, group=\"member\") x509_cert: str =", "CheckVerifierStatusResponse, ), \"/services.trustregistry.v1.TrustRegistry/FetchData\": grpclib.const.Handler( self.__rpc_fetch_data, grpclib.const.Cardinality.UNARY_STREAM, FetchDataRequest, FetchDataResponse, ), } from ...common import", "\"__common_v1__.ResponseStatus\" = betterproto.enum_field(1) @dataclass(eq=False, repr=False) class SearchRegistryRequest(betterproto.Message): query: str = betterproto.string_field(1) continuation_token: str", ") -> \"CheckVerifierStatusResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def fetch_data( self, governance_framework_uri: str, query: str", "\"RemoveFrameworkResponse\": request = RemoveFrameworkRequest() if governance_framework is not None: request.governance_framework = governance_framework return", "= betterproto.string_field(1, group=\"authority\") x509_cert: str = betterproto.string_field(2, group=\"authority\") credential_type_uri: str = betterproto.string_field(10) governance_framework_uri:", "FetchDataResponse(betterproto.Message): response_json: str = betterproto.string_field(1) has_more_results: bool = betterproto.bool_field(2) continuation_token: str = betterproto.string_field(3)", "def register_verifier( self, *, did_uri: str = \"\", x509_cert: str = \"\", presentation_type_uri:", "request.did_uri = did_uri if x509_cert: request.x509_cert = x509_cert request.credential_type_uri = credential_type_uri return await", "self.unregister_verifier(**request_kwargs) await stream.send_message(response) async def __rpc_check_issuer_status(self, stream: grpclib.server.Stream) -> None: request = await", "\"GovernanceFramework\" ) -> \"RemoveFrameworkResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async def search_registry( self, query: str, continuation_token:", "class RegistrationStatus(betterproto.Enum): CURRENT = 0 EXPIRED = 1 TERMINATED = 2 REVOKED =", "str, x509_cert: str, presentation_type_uri: str, governance_framework_uri: str, ) -> \"UnregisterVerifierResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async", "= valid_until_utc request.governance_framework_uri = governance_framework_uri return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/RegisterVerifier\", request, RegisterVerifierResponse, ) async", "def register_issuer( self, did_uri: str, x509_cert: str, credential_type_uri: str, valid_from_utc: int, valid_until_utc: int,", "stream.recv_message() request_kwargs = { \"governance_framework_uri\": request.governance_framework_uri, \"query\": request.query, } await self._call_rpc_handler_server_stream( self.fetch_data, stream,", "*, governance_framework_uri: str = \"\", query: str = \"\" ) -> AsyncIterator[\"FetchDataResponse\"]: request", "options is not None: request.options = options return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/SearchRegistry\", request, SearchRegistryResponse,", "UnregisterIssuerRequest() if did_uri: request.did_uri = did_uri if x509_cert: request.x509_cert = x509_cert request.credential_type_uri =", "class RemoveFrameworkRequest(betterproto.Message): governance_framework: \"GovernanceFramework\" = betterproto.message_field(1) @dataclass(eq=False, repr=False) class RemoveFrameworkResponse(betterproto.Message): status: \"__common_v1__.ResponseStatus\" =", "did_uri: str, x509_cert: str, credential_type_uri: str, governance_framework_uri: str, ) -> \"UnregisterIssuerResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)", "\"/services.trustregistry.v1.TrustRegistry/RegisterVerifier\": grpclib.const.Handler( self.__rpc_register_verifier, grpclib.const.Cardinality.UNARY_UNARY, RegisterVerifierRequest, RegisterVerifierResponse, ), \"/services.trustregistry.v1.TrustRegistry/UnregisterIssuer\": grpclib.const.Handler( self.__rpc_unregister_issuer, grpclib.const.Cardinality.UNARY_UNARY, UnregisterIssuerRequest, UnregisterIssuerResponse,", "x509_cert: str = betterproto.string_field(2, group=\"authority\") credential_type_uri: str = betterproto.string_field(10) valid_from_utc: int = betterproto.uint64_field(11)", "protocol buffer compiler. DO NOT EDIT! # sources: services/trust-registry/v1/trust-registry.proto # plugin: python-betterproto from", "async def __rpc_remove_framework(self, stream: grpclib.server.Stream) -> None: request = await stream.recv_message() request_kwargs =", "await self.check_issuer_status(**request_kwargs) await stream.send_message(response) async def __rpc_check_verifier_status(self, stream: grpclib.server.Stream) -> None: request =", "async def __rpc_register_issuer(self, stream: grpclib.server.Stream) -> None: request = await stream.recv_message() request_kwargs =", "not None: request.governance_framework = governance_framework return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/RemoveFramework\", request, RemoveFrameworkResponse, ) async", "async def unregister_verifier( self, *, did_uri: str = \"\", x509_cert: str = \"\",", "RegisterIssuerResponse, ), \"/services.trustregistry.v1.TrustRegistry/RegisterVerifier\": grpclib.const.Handler( self.__rpc_register_verifier, grpclib.const.Cardinality.UNARY_UNARY, RegisterVerifierRequest, RegisterVerifierResponse, ), \"/services.trustregistry.v1.TrustRegistry/UnregisterIssuer\": grpclib.const.Handler( self.__rpc_unregister_issuer, grpclib.const.Cardinality.UNARY_UNARY,", "def search_registry( self, query: str, continuation_token: str, options: \"__common_v1__.RequestOptions\", ) -> \"SearchRegistryResponse\": raise", "response = await self.check_verifier_status(**request_kwargs) await stream.send_message(response) async def __rpc_fetch_data(self, stream: grpclib.server.Stream) -> None:", "is not None: request.governance_framework = governance_framework return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/AddFramework\", request, AddFrameworkResponse, )", "= betterproto.string_field(1) did_uri: str = betterproto.string_field(2, group=\"member\") x509_cert: str = betterproto.string_field(3, group=\"member\") presentation_type_uri:", "str, did_uri: str, x509_cert: str, credential_type_uri: str, ) -> \"CheckIssuerStatusResponse\": raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED) async", "CheckIssuerStatusResponse(betterproto.Message): status: \"RegistrationStatus\" = betterproto.enum_field(1) @dataclass(eq=False, repr=False) class CheckVerifierStatusRequest(betterproto.Message): governance_framework_uri: str = betterproto.string_field(1)", "x509_cert: str = \"\", credential_type_uri: str = \"\", governance_framework_uri: str = \"\", )", "request.presentation_type_uri = presentation_type_uri return await self._unary_unary( \"/services.trustregistry.v1.TrustRegistry/CheckVerifierStatus\", request, CheckVerifierStatusResponse, ) async def fetch_data(", "def unregister_issuer( self, did_uri: str, x509_cert: str, credential_type_uri: str, governance_framework_uri: str, ) ->", "= betterproto.string_field(2, group=\"member\") x509_cert: str = betterproto.string_field(3, group=\"member\") credential_type_uri: str = betterproto.string_field(4) @dataclass(eq=False," ]
[]
[ "from metrics.wasserstein import wasserstein_2 def compute_metrics(ds, split, inv_temp, num_parties, num_classes, alpha, lengthscale, party_datasets,", "dkls_after = average_dkl(party_datasets_with_rewards, reference_dataset) print(dkls_after) print(\"Correlation coefficient with alpha: \\n{}\".format(np.corrcoef(alpha, dkls_after)[0, 1])) class_props", "os from pathlib import Path from metrics.class_imbalance import get_classes, class_proportion from metrics.phi_div import", "average_dkl(party_datasets_with_rewards, reference_dataset) print(dkls_after) print(\"Correlation coefficient with alpha: \\n{}\".format(np.corrcoef(alpha, dkls_after)[0, 1])) class_props = []", "for result in rewards: class_props.append( class_proportion(get_classes(np.array(result), candidate_datasets[0], candidate_labels), num_classes)) print(\"Class proportions and class", "\\n{}\".format(wass_before)) print(\"Wasserstein-2 after: \\n{}\".format(wass_after)) print(\"Correlation coefficient with alpha: \\n{}\".format(np.corrcoef(alpha, wass_after)[0, 1])) #Save metrics", "dkls_after)[0, 1])) class_props = [] for result in rewards: class_props.append( class_proportion(get_classes(np.array(result), candidate_datasets[0], candidate_labels),", "in rewards])) print(\"alpha:\\n{}\".format(alpha)) print(\"Calculating average DKLs before\") dkls_before = average_dkl(party_datasets, reference_dataset) print(dkls_before) print(\"Calculating", "i in range(num_parties)] wass_after = [wasserstein_2(np.concatenate([party_datasets[i], np.array(rewards[i])], axis=0), reference_dataset) for i in range(num_parties)]", "class_props = [] for result in rewards: class_props.append( class_proportion(get_classes(np.array(result), candidate_datasets[0], candidate_labels), num_classes)) print(\"Class", "= [wasserstein_2(np.concatenate([party_datasets[i], np.array(rewards[i])], axis=0), reference_dataset) for i in range(num_parties)] print(\"Wasserstein-2 before: \\n{}\".format(wass_before)) print(\"Wasserstein-2", "party_labels, reference_dataset, candidate_datasets, candidate_labels, rewards, deltas, mus, alpha, lengthscale, class_props, wass_before, wass_after, dkls_before,", "mus): print(\"Computing metrics\") party_datasets_with_rewards = [] for i in range(num_parties): party_datasets_with_rewards.append(np.concatenate([party_datasets[i], rewards[i]], axis=0))", "deltas, mus): print(\"Computing metrics\") party_datasets_with_rewards = [] for i in range(num_parties): party_datasets_with_rewards.append(np.concatenate([party_datasets[i], rewards[i]],", "average DKLs before\") dkls_before = average_dkl(party_datasets, reference_dataset) print(dkls_before) print(\"Calculating average DKLs after\") dkls_after", "'/data/metrics').mkdir(parents=True, exist_ok=True) pickle.dump((party_datasets, party_labels, reference_dataset, candidate_datasets, candidate_labels, rewards, deltas, mus, alpha, lengthscale, class_props,", "print(\"Class proportions and class imbalance of rewards: {}\".format(class_props)) print(\"Calculating Wasserstein-2 before\") wass_before =", "rewards, deltas, mus): print(\"Computing metrics\") party_datasets_with_rewards = [] for i in range(num_parties): party_datasets_with_rewards.append(np.concatenate([party_datasets[i],", "candidate_datasets[0], candidate_labels), num_classes)) print(\"Class proportions and class imbalance of rewards: {}\".format(class_props)) print(\"Calculating Wasserstein-2", "reference_dataset, candidate_datasets, candidate_labels, rewards, deltas, mus, alpha, lengthscale, class_props, wass_before, wass_after, dkls_before, dkls_after),", "in range(num_parties)] print(\"Wasserstein-2 before: \\n{}\".format(wass_before)) print(\"Wasserstein-2 after: \\n{}\".format(wass_after)) print(\"Correlation coefficient with alpha: \\n{}\".format(np.corrcoef(alpha,", "[wasserstein_2(np.concatenate([party_datasets[i], np.array(rewards[i])], axis=0), reference_dataset) for i in range(num_parties)] print(\"Wasserstein-2 before: \\n{}\".format(wass_before)) print(\"Wasserstein-2 after:", "alpha: \\n{}\".format(np.corrcoef(alpha, dkls_after)[0, 1])) class_props = [] for result in rewards: class_props.append( class_proportion(get_classes(np.array(result),", "deltas, mus, alpha, lengthscale, class_props, wass_before, wass_after, dkls_before, dkls_after), open(\"data/metrics/metrics-{}-{}-{}.p\".format(ds, split, inv_temp), 'wb'))", "candidate_datasets, candidate_labels, rewards, deltas, mus): print(\"Computing metrics\") party_datasets_with_rewards = [] for i in", "= [] for result in rewards: class_props.append( class_proportion(get_classes(np.array(result), candidate_datasets[0], candidate_labels), num_classes)) print(\"Class proportions", "Wasserstein-2 before\") wass_before = [wasserstein_2(party_datasets[i], reference_dataset) for i in range(num_parties)] wass_after = [wasserstein_2(np.concatenate([party_datasets[i],", "metrics.wasserstein import wasserstein_2 def compute_metrics(ds, split, inv_temp, num_parties, num_classes, alpha, lengthscale, party_datasets, party_labels,", "pickle.dump((party_datasets, party_labels, reference_dataset, candidate_datasets, candidate_labels, rewards, deltas, mus, alpha, lengthscale, class_props, wass_before, wass_after,", "before\") dkls_before = average_dkl(party_datasets, reference_dataset) print(dkls_before) print(\"Calculating average DKLs after\") dkls_after = average_dkl(party_datasets_with_rewards,", "candidate_labels, rewards, deltas, mus): print(\"Computing metrics\") party_datasets_with_rewards = [] for i in range(num_parties):", "for i in range(num_parties): party_datasets_with_rewards.append(np.concatenate([party_datasets[i], rewards[i]], axis=0)) print(\"Length of rewards: {}\".format([len(r) for r", "candidate_labels, rewards, deltas, mus, alpha, lengthscale, class_props, wass_before, wass_after, dkls_before, dkls_after), open(\"data/metrics/metrics-{}-{}-{}.p\".format(ds, split,", "reference_dataset, candidate_datasets, candidate_labels, rewards, deltas, mus): print(\"Computing metrics\") party_datasets_with_rewards = [] for i", "reference_dataset) for i in range(num_parties)] wass_after = [wasserstein_2(np.concatenate([party_datasets[i], np.array(rewards[i])], axis=0), reference_dataset) for i", "alpha: \\n{}\".format(np.corrcoef(alpha, wass_after)[0, 1])) #Save metrics Path(os.getcwd() + '/data/metrics').mkdir(parents=True, exist_ok=True) pickle.dump((party_datasets, party_labels, reference_dataset,", "get_classes, class_proportion from metrics.phi_div import average_dkl from metrics.wasserstein import wasserstein_2 def compute_metrics(ds, split,", "print(\"Wasserstein-2 after: \\n{}\".format(wass_after)) print(\"Correlation coefficient with alpha: \\n{}\".format(np.corrcoef(alpha, wass_after)[0, 1])) #Save metrics Path(os.getcwd()", "compute_metrics(ds, split, inv_temp, num_parties, num_classes, alpha, lengthscale, party_datasets, party_labels, reference_dataset, candidate_datasets, candidate_labels, rewards,", "import numpy as np import pickle import os from pathlib import Path from", "from metrics.class_imbalance import get_classes, class_proportion from metrics.phi_div import average_dkl from metrics.wasserstein import wasserstein_2", "print(\"Correlation coefficient with alpha: \\n{}\".format(np.corrcoef(alpha, wass_after)[0, 1])) #Save metrics Path(os.getcwd() + '/data/metrics').mkdir(parents=True, exist_ok=True)", "def compute_metrics(ds, split, inv_temp, num_parties, num_classes, alpha, lengthscale, party_datasets, party_labels, reference_dataset, candidate_datasets, candidate_labels,", "DKLs after\") dkls_after = average_dkl(party_datasets_with_rewards, reference_dataset) print(dkls_after) print(\"Correlation coefficient with alpha: \\n{}\".format(np.corrcoef(alpha, dkls_after)[0,", "coefficient with alpha: \\n{}\".format(np.corrcoef(alpha, wass_after)[0, 1])) #Save metrics Path(os.getcwd() + '/data/metrics').mkdir(parents=True, exist_ok=True) pickle.dump((party_datasets,", "as np import pickle import os from pathlib import Path from metrics.class_imbalance import", "print(dkls_before) print(\"Calculating average DKLs after\") dkls_after = average_dkl(party_datasets_with_rewards, reference_dataset) print(dkls_after) print(\"Correlation coefficient with", "for i in range(num_parties)] wass_after = [wasserstein_2(np.concatenate([party_datasets[i], np.array(rewards[i])], axis=0), reference_dataset) for i in", "party_datasets_with_rewards = [] for i in range(num_parties): party_datasets_with_rewards.append(np.concatenate([party_datasets[i], rewards[i]], axis=0)) print(\"Length of rewards:", "[] for result in rewards: class_props.append( class_proportion(get_classes(np.array(result), candidate_datasets[0], candidate_labels), num_classes)) print(\"Class proportions and", "metrics.phi_div import average_dkl from metrics.wasserstein import wasserstein_2 def compute_metrics(ds, split, inv_temp, num_parties, num_classes,", "lengthscale, party_datasets, party_labels, reference_dataset, candidate_datasets, candidate_labels, rewards, deltas, mus): print(\"Computing metrics\") party_datasets_with_rewards =", "metrics\") party_datasets_with_rewards = [] for i in range(num_parties): party_datasets_with_rewards.append(np.concatenate([party_datasets[i], rewards[i]], axis=0)) print(\"Length of", "average_dkl(party_datasets, reference_dataset) print(dkls_before) print(\"Calculating average DKLs after\") dkls_after = average_dkl(party_datasets_with_rewards, reference_dataset) print(dkls_after) print(\"Correlation", "with alpha: \\n{}\".format(np.corrcoef(alpha, wass_after)[0, 1])) #Save metrics Path(os.getcwd() + '/data/metrics').mkdir(parents=True, exist_ok=True) pickle.dump((party_datasets, party_labels,", "print(\"Calculating average DKLs before\") dkls_before = average_dkl(party_datasets, reference_dataset) print(dkls_before) print(\"Calculating average DKLs after\")", "class_props.append( class_proportion(get_classes(np.array(result), candidate_datasets[0], candidate_labels), num_classes)) print(\"Class proportions and class imbalance of rewards: {}\".format(class_props))", "Path(os.getcwd() + '/data/metrics').mkdir(parents=True, exist_ok=True) pickle.dump((party_datasets, party_labels, reference_dataset, candidate_datasets, candidate_labels, rewards, deltas, mus, alpha,", "wass_before = [wasserstein_2(party_datasets[i], reference_dataset) for i in range(num_parties)] wass_after = [wasserstein_2(np.concatenate([party_datasets[i], np.array(rewards[i])], axis=0),", "axis=0)) print(\"Length of rewards: {}\".format([len(r) for r in rewards])) print(\"alpha:\\n{}\".format(alpha)) print(\"Calculating average DKLs", "rewards, deltas, mus, alpha, lengthscale, class_props, wass_before, wass_after, dkls_before, dkls_after), open(\"data/metrics/metrics-{}-{}-{}.p\".format(ds, split, inv_temp),", "= [] for i in range(num_parties): party_datasets_with_rewards.append(np.concatenate([party_datasets[i], rewards[i]], axis=0)) print(\"Length of rewards: {}\".format([len(r)", "print(\"alpha:\\n{}\".format(alpha)) print(\"Calculating average DKLs before\") dkls_before = average_dkl(party_datasets, reference_dataset) print(dkls_before) print(\"Calculating average DKLs", "from metrics.phi_div import average_dkl from metrics.wasserstein import wasserstein_2 def compute_metrics(ds, split, inv_temp, num_parties,", "[] for i in range(num_parties): party_datasets_with_rewards.append(np.concatenate([party_datasets[i], rewards[i]], axis=0)) print(\"Length of rewards: {}\".format([len(r) for", "print(\"Calculating average DKLs after\") dkls_after = average_dkl(party_datasets_with_rewards, reference_dataset) print(dkls_after) print(\"Correlation coefficient with alpha:", "result in rewards: class_props.append( class_proportion(get_classes(np.array(result), candidate_datasets[0], candidate_labels), num_classes)) print(\"Class proportions and class imbalance", "{}\".format(class_props)) print(\"Calculating Wasserstein-2 before\") wass_before = [wasserstein_2(party_datasets[i], reference_dataset) for i in range(num_parties)] wass_after", "party_datasets_with_rewards.append(np.concatenate([party_datasets[i], rewards[i]], axis=0)) print(\"Length of rewards: {}\".format([len(r) for r in rewards])) print(\"alpha:\\n{}\".format(alpha)) print(\"Calculating", "numpy as np import pickle import os from pathlib import Path from metrics.class_imbalance", "range(num_parties)] print(\"Wasserstein-2 before: \\n{}\".format(wass_before)) print(\"Wasserstein-2 after: \\n{}\".format(wass_after)) print(\"Correlation coefficient with alpha: \\n{}\".format(np.corrcoef(alpha, wass_after)[0,", "rewards: class_props.append( class_proportion(get_classes(np.array(result), candidate_datasets[0], candidate_labels), num_classes)) print(\"Class proportions and class imbalance of rewards:", "pickle import os from pathlib import Path from metrics.class_imbalance import get_classes, class_proportion from", "class_proportion from metrics.phi_div import average_dkl from metrics.wasserstein import wasserstein_2 def compute_metrics(ds, split, inv_temp,", "of rewards: {}\".format(class_props)) print(\"Calculating Wasserstein-2 before\") wass_before = [wasserstein_2(party_datasets[i], reference_dataset) for i in", "print(\"Length of rewards: {}\".format([len(r) for r in rewards])) print(\"alpha:\\n{}\".format(alpha)) print(\"Calculating average DKLs before\")", "and class imbalance of rewards: {}\".format(class_props)) print(\"Calculating Wasserstein-2 before\") wass_before = [wasserstein_2(party_datasets[i], reference_dataset)", "i in range(num_parties): party_datasets_with_rewards.append(np.concatenate([party_datasets[i], rewards[i]], axis=0)) print(\"Length of rewards: {}\".format([len(r) for r in", "rewards])) print(\"alpha:\\n{}\".format(alpha)) print(\"Calculating average DKLs before\") dkls_before = average_dkl(party_datasets, reference_dataset) print(dkls_before) print(\"Calculating average", "np.array(rewards[i])], axis=0), reference_dataset) for i in range(num_parties)] print(\"Wasserstein-2 before: \\n{}\".format(wass_before)) print(\"Wasserstein-2 after: \\n{}\".format(wass_after))", "r in rewards])) print(\"alpha:\\n{}\".format(alpha)) print(\"Calculating average DKLs before\") dkls_before = average_dkl(party_datasets, reference_dataset) print(dkls_before)", "i in range(num_parties)] print(\"Wasserstein-2 before: \\n{}\".format(wass_before)) print(\"Wasserstein-2 after: \\n{}\".format(wass_after)) print(\"Correlation coefficient with alpha:", "1])) class_props = [] for result in rewards: class_props.append( class_proportion(get_classes(np.array(result), candidate_datasets[0], candidate_labels), num_classes))", "\\n{}\".format(np.corrcoef(alpha, wass_after)[0, 1])) #Save metrics Path(os.getcwd() + '/data/metrics').mkdir(parents=True, exist_ok=True) pickle.dump((party_datasets, party_labels, reference_dataset, candidate_datasets,", "for i in range(num_parties)] print(\"Wasserstein-2 before: \\n{}\".format(wass_before)) print(\"Wasserstein-2 after: \\n{}\".format(wass_after)) print(\"Correlation coefficient with", "import Path from metrics.class_imbalance import get_classes, class_proportion from metrics.phi_div import average_dkl from metrics.wasserstein", "party_labels, reference_dataset, candidate_datasets, candidate_labels, rewards, deltas, mus): print(\"Computing metrics\") party_datasets_with_rewards = [] for", "for r in rewards])) print(\"alpha:\\n{}\".format(alpha)) print(\"Calculating average DKLs before\") dkls_before = average_dkl(party_datasets, reference_dataset)", "print(\"Calculating Wasserstein-2 before\") wass_before = [wasserstein_2(party_datasets[i], reference_dataset) for i in range(num_parties)] wass_after =", "alpha, lengthscale, party_datasets, party_labels, reference_dataset, candidate_datasets, candidate_labels, rewards, deltas, mus): print(\"Computing metrics\") party_datasets_with_rewards", "#Save metrics Path(os.getcwd() + '/data/metrics').mkdir(parents=True, exist_ok=True) pickle.dump((party_datasets, party_labels, reference_dataset, candidate_datasets, candidate_labels, rewards, deltas,", "reference_dataset) for i in range(num_parties)] print(\"Wasserstein-2 before: \\n{}\".format(wass_before)) print(\"Wasserstein-2 after: \\n{}\".format(wass_after)) print(\"Correlation coefficient", "\\n{}\".format(wass_after)) print(\"Correlation coefficient with alpha: \\n{}\".format(np.corrcoef(alpha, wass_after)[0, 1])) #Save metrics Path(os.getcwd() + '/data/metrics').mkdir(parents=True,", "in range(num_parties): party_datasets_with_rewards.append(np.concatenate([party_datasets[i], rewards[i]], axis=0)) print(\"Length of rewards: {}\".format([len(r) for r in rewards]))", "pathlib import Path from metrics.class_imbalance import get_classes, class_proportion from metrics.phi_div import average_dkl from", "axis=0), reference_dataset) for i in range(num_parties)] print(\"Wasserstein-2 before: \\n{}\".format(wass_before)) print(\"Wasserstein-2 after: \\n{}\".format(wass_after)) print(\"Correlation", "print(\"Computing metrics\") party_datasets_with_rewards = [] for i in range(num_parties): party_datasets_with_rewards.append(np.concatenate([party_datasets[i], rewards[i]], axis=0)) print(\"Length", "class imbalance of rewards: {}\".format(class_props)) print(\"Calculating Wasserstein-2 before\") wass_before = [wasserstein_2(party_datasets[i], reference_dataset) for", "print(dkls_after) print(\"Correlation coefficient with alpha: \\n{}\".format(np.corrcoef(alpha, dkls_after)[0, 1])) class_props = [] for result", "wass_after = [wasserstein_2(np.concatenate([party_datasets[i], np.array(rewards[i])], axis=0), reference_dataset) for i in range(num_parties)] print(\"Wasserstein-2 before: \\n{}\".format(wass_before))", "dkls_before = average_dkl(party_datasets, reference_dataset) print(dkls_before) print(\"Calculating average DKLs after\") dkls_after = average_dkl(party_datasets_with_rewards, reference_dataset)", "candidate_labels), num_classes)) print(\"Class proportions and class imbalance of rewards: {}\".format(class_props)) print(\"Calculating Wasserstein-2 before\")", "before: \\n{}\".format(wass_before)) print(\"Wasserstein-2 after: \\n{}\".format(wass_after)) print(\"Correlation coefficient with alpha: \\n{}\".format(np.corrcoef(alpha, wass_after)[0, 1])) #Save", "num_parties, num_classes, alpha, lengthscale, party_datasets, party_labels, reference_dataset, candidate_datasets, candidate_labels, rewards, deltas, mus): print(\"Computing", "rewards: {}\".format([len(r) for r in rewards])) print(\"alpha:\\n{}\".format(alpha)) print(\"Calculating average DKLs before\") dkls_before =", "class_proportion(get_classes(np.array(result), candidate_datasets[0], candidate_labels), num_classes)) print(\"Class proportions and class imbalance of rewards: {}\".format(class_props)) print(\"Calculating", "wass_after)[0, 1])) #Save metrics Path(os.getcwd() + '/data/metrics').mkdir(parents=True, exist_ok=True) pickle.dump((party_datasets, party_labels, reference_dataset, candidate_datasets, candidate_labels,", "party_datasets, party_labels, reference_dataset, candidate_datasets, candidate_labels, rewards, deltas, mus): print(\"Computing metrics\") party_datasets_with_rewards = []", "range(num_parties)] wass_after = [wasserstein_2(np.concatenate([party_datasets[i], np.array(rewards[i])], axis=0), reference_dataset) for i in range(num_parties)] print(\"Wasserstein-2 before:", "1])) #Save metrics Path(os.getcwd() + '/data/metrics').mkdir(parents=True, exist_ok=True) pickle.dump((party_datasets, party_labels, reference_dataset, candidate_datasets, candidate_labels, rewards,", "DKLs before\") dkls_before = average_dkl(party_datasets, reference_dataset) print(dkls_before) print(\"Calculating average DKLs after\") dkls_after =", "import pickle import os from pathlib import Path from metrics.class_imbalance import get_classes, class_proportion", "reference_dataset) print(dkls_before) print(\"Calculating average DKLs after\") dkls_after = average_dkl(party_datasets_with_rewards, reference_dataset) print(dkls_after) print(\"Correlation coefficient", "\\n{}\".format(np.corrcoef(alpha, dkls_after)[0, 1])) class_props = [] for result in rewards: class_props.append( class_proportion(get_classes(np.array(result), candidate_datasets[0],", "imbalance of rewards: {}\".format(class_props)) print(\"Calculating Wasserstein-2 before\") wass_before = [wasserstein_2(party_datasets[i], reference_dataset) for i", "= [wasserstein_2(party_datasets[i], reference_dataset) for i in range(num_parties)] wass_after = [wasserstein_2(np.concatenate([party_datasets[i], np.array(rewards[i])], axis=0), reference_dataset)", "= average_dkl(party_datasets_with_rewards, reference_dataset) print(dkls_after) print(\"Correlation coefficient with alpha: \\n{}\".format(np.corrcoef(alpha, dkls_after)[0, 1])) class_props =", "inv_temp, num_parties, num_classes, alpha, lengthscale, party_datasets, party_labels, reference_dataset, candidate_datasets, candidate_labels, rewards, deltas, mus):", "= average_dkl(party_datasets, reference_dataset) print(dkls_before) print(\"Calculating average DKLs after\") dkls_after = average_dkl(party_datasets_with_rewards, reference_dataset) print(dkls_after)", "in rewards: class_props.append( class_proportion(get_classes(np.array(result), candidate_datasets[0], candidate_labels), num_classes)) print(\"Class proportions and class imbalance of", "exist_ok=True) pickle.dump((party_datasets, party_labels, reference_dataset, candidate_datasets, candidate_labels, rewards, deltas, mus, alpha, lengthscale, class_props, wass_before,", "in range(num_parties)] wass_after = [wasserstein_2(np.concatenate([party_datasets[i], np.array(rewards[i])], axis=0), reference_dataset) for i in range(num_parties)] print(\"Wasserstein-2", "coefficient with alpha: \\n{}\".format(np.corrcoef(alpha, dkls_after)[0, 1])) class_props = [] for result in rewards:", "candidate_datasets, candidate_labels, rewards, deltas, mus, alpha, lengthscale, class_props, wass_before, wass_after, dkls_before, dkls_after), open(\"data/metrics/metrics-{}-{}-{}.p\".format(ds,", "reference_dataset) print(dkls_after) print(\"Correlation coefficient with alpha: \\n{}\".format(np.corrcoef(alpha, dkls_after)[0, 1])) class_props = [] for", "Path from metrics.class_imbalance import get_classes, class_proportion from metrics.phi_div import average_dkl from metrics.wasserstein import", "from pathlib import Path from metrics.class_imbalance import get_classes, class_proportion from metrics.phi_div import average_dkl", "rewards[i]], axis=0)) print(\"Length of rewards: {}\".format([len(r) for r in rewards])) print(\"alpha:\\n{}\".format(alpha)) print(\"Calculating average", "rewards: {}\".format(class_props)) print(\"Calculating Wasserstein-2 before\") wass_before = [wasserstein_2(party_datasets[i], reference_dataset) for i in range(num_parties)]", "print(\"Wasserstein-2 before: \\n{}\".format(wass_before)) print(\"Wasserstein-2 after: \\n{}\".format(wass_after)) print(\"Correlation coefficient with alpha: \\n{}\".format(np.corrcoef(alpha, wass_after)[0, 1]))", "of rewards: {}\".format([len(r) for r in rewards])) print(\"alpha:\\n{}\".format(alpha)) print(\"Calculating average DKLs before\") dkls_before", "import average_dkl from metrics.wasserstein import wasserstein_2 def compute_metrics(ds, split, inv_temp, num_parties, num_classes, alpha,", "range(num_parties): party_datasets_with_rewards.append(np.concatenate([party_datasets[i], rewards[i]], axis=0)) print(\"Length of rewards: {}\".format([len(r) for r in rewards])) print(\"alpha:\\n{}\".format(alpha))", "{}\".format([len(r) for r in rewards])) print(\"alpha:\\n{}\".format(alpha)) print(\"Calculating average DKLs before\") dkls_before = average_dkl(party_datasets,", "after: \\n{}\".format(wass_after)) print(\"Correlation coefficient with alpha: \\n{}\".format(np.corrcoef(alpha, wass_after)[0, 1])) #Save metrics Path(os.getcwd() +", "import get_classes, class_proportion from metrics.phi_div import average_dkl from metrics.wasserstein import wasserstein_2 def compute_metrics(ds,", "num_classes, alpha, lengthscale, party_datasets, party_labels, reference_dataset, candidate_datasets, candidate_labels, rewards, deltas, mus): print(\"Computing metrics\")", "[wasserstein_2(party_datasets[i], reference_dataset) for i in range(num_parties)] wass_after = [wasserstein_2(np.concatenate([party_datasets[i], np.array(rewards[i])], axis=0), reference_dataset) for", "metrics Path(os.getcwd() + '/data/metrics').mkdir(parents=True, exist_ok=True) pickle.dump((party_datasets, party_labels, reference_dataset, candidate_datasets, candidate_labels, rewards, deltas, mus,", "print(\"Correlation coefficient with alpha: \\n{}\".format(np.corrcoef(alpha, dkls_after)[0, 1])) class_props = [] for result in", "proportions and class imbalance of rewards: {}\".format(class_props)) print(\"Calculating Wasserstein-2 before\") wass_before = [wasserstein_2(party_datasets[i],", "average_dkl from metrics.wasserstein import wasserstein_2 def compute_metrics(ds, split, inv_temp, num_parties, num_classes, alpha, lengthscale,", "metrics.class_imbalance import get_classes, class_proportion from metrics.phi_div import average_dkl from metrics.wasserstein import wasserstein_2 def", "import wasserstein_2 def compute_metrics(ds, split, inv_temp, num_parties, num_classes, alpha, lengthscale, party_datasets, party_labels, reference_dataset,", "split, inv_temp, num_parties, num_classes, alpha, lengthscale, party_datasets, party_labels, reference_dataset, candidate_datasets, candidate_labels, rewards, deltas,", "np import pickle import os from pathlib import Path from metrics.class_imbalance import get_classes,", "average DKLs after\") dkls_after = average_dkl(party_datasets_with_rewards, reference_dataset) print(dkls_after) print(\"Correlation coefficient with alpha: \\n{}\".format(np.corrcoef(alpha,", "before\") wass_before = [wasserstein_2(party_datasets[i], reference_dataset) for i in range(num_parties)] wass_after = [wasserstein_2(np.concatenate([party_datasets[i], np.array(rewards[i])],", "+ '/data/metrics').mkdir(parents=True, exist_ok=True) pickle.dump((party_datasets, party_labels, reference_dataset, candidate_datasets, candidate_labels, rewards, deltas, mus, alpha, lengthscale,", "import os from pathlib import Path from metrics.class_imbalance import get_classes, class_proportion from metrics.phi_div", "num_classes)) print(\"Class proportions and class imbalance of rewards: {}\".format(class_props)) print(\"Calculating Wasserstein-2 before\") wass_before", "with alpha: \\n{}\".format(np.corrcoef(alpha, dkls_after)[0, 1])) class_props = [] for result in rewards: class_props.append(", "after\") dkls_after = average_dkl(party_datasets_with_rewards, reference_dataset) print(dkls_after) print(\"Correlation coefficient with alpha: \\n{}\".format(np.corrcoef(alpha, dkls_after)[0, 1]))", "wasserstein_2 def compute_metrics(ds, split, inv_temp, num_parties, num_classes, alpha, lengthscale, party_datasets, party_labels, reference_dataset, candidate_datasets," ]
[ "high influence on the reconstruction error. Transition that have a high gradient on", "should be changed further and therefore given a low reward. \"\"\" batch_dim =", ") action_recon = mode_latent_model.reconstruct_action( features_seq=features_seq, mode_sample=posterior['samples'] ) ll = action_recon['dists'].log_prob(action_seq).mean(dim=0).sum() mse = F.mse_loss(action_recon['samples'],", "posterior, features_seq = mode_latent_model.sample_mode_posterior_with_features( obs_seq=obs_seq ) action_recon = mode_latent_model.reconstruct_action( features_seq=features_seq, mode_sample=posterior['samples'] ) ll", "on ll should be changed further and therefore given a low reward. \"\"\"", "\"\"\" Args: mode_latent_model : latent variable model obs_seq : (N, S, obs_dim) tensor", "-2 data_dim = -1 batch_size = obs_seq.size(batch_dim) seq_len = obs_seq.size(seq_dim) obs_seq.requires_grad = True", "mode_latent_model : latent variable model obs_seq : (N, S, obs_dim) tensor action_seq :", "ll = action_recon['dists'].log_prob(action_seq).mean(dim=0).sum() mse = F.mse_loss(action_recon['samples'], action_seq) ll.backward() gradients_per_transition = obs_seq.grad.sum(dim=data_dim, keepdim=True) assert", "action_seq : (N, S, action_dim) tensor skill_seq : (N, S, skill_dim) tensor Return:", "action_recon = mode_latent_model.reconstruct_action( features_seq=features_seq, mode_sample=posterior['samples'] ) ll = action_recon['dists'].log_prob(action_seq).mean(dim=0).sum() mse = F.mse_loss(action_recon['samples'], action_seq)", "tensor Return: Loss : tensor Overall goal for the SAC-Agent is a high", "= mode_latent_model.reconstruct_action( features_seq=features_seq, mode_sample=posterior['samples'] ) ll = action_recon['dists'].log_prob(action_seq).mean(dim=0).sum() mse = F.mse_loss(action_recon['samples'], action_seq) ll.backward()", "high gradient on ll should be changed further and therefore given a low", "Return: Loss : tensor Overall goal for the SAC-Agent is a high reconstruction", "error. Transition that have a high gradient on ll should be changed further", "= mode_latent_model.sample_mode_posterior_with_features( obs_seq=obs_seq ) action_recon = mode_latent_model.reconstruct_action( features_seq=features_seq, mode_sample=posterior['samples'] ) ll = action_recon['dists'].log_prob(action_seq).mean(dim=0).sum()", "skill_seq : (N, S, skill_dim) tensor Return: Loss : tensor Overall goal for", "(N, S, skill_dim) tensor Return: Loss : tensor Overall goal for the SAC-Agent", "mode_sample=posterior['samples'] ) ll = action_recon['dists'].log_prob(action_seq).mean(dim=0).sum() mse = F.mse_loss(action_recon['samples'], action_seq) ll.backward() gradients_per_transition = obs_seq.grad.sum(dim=data_dim,", "goal for the SAC-Agent is a high reconstruction error for the mode latent", "-1 batch_size = obs_seq.size(batch_dim) seq_len = obs_seq.size(seq_dim) obs_seq.requires_grad = True posterior, features_seq =", "is a high reconstruction error for the mode latent model. Hence ll should", "model obs_seq : (N, S, obs_dim) tensor action_seq : (N, S, action_dim) tensor", "the SAC-Agent is a high reconstruction error for the mode latent model. Hence", "reconstruction error. Transition that have a high gradient on ll should be changed", "further and therefore given a low reward. \"\"\" batch_dim = 0 seq_dim =", "batch_dim = 0 seq_dim = -2 data_dim = -1 batch_size = obs_seq.size(batch_dim) seq_len", "torch.Tensor, action_seq: torch.Tensor, skill_seq: torch.Tensor, )->torch.Tensor: \"\"\" Args: mode_latent_model : latent variable model", "for the SAC-Agent is a high reconstruction error for the mode latent model.", "have high influence on the reconstruction error. Transition that have a high gradient", "F.mse_loss(action_recon['samples'], action_seq) ll.backward() gradients_per_transition = obs_seq.grad.sum(dim=data_dim, keepdim=True) assert gradients_per_transition.shape == torch.Size((batch_size, seq_len, 1))", "therefore given a low reward. \"\"\" batch_dim = 0 seq_dim = -2 data_dim", "features_seq = mode_latent_model.sample_mode_posterior_with_features( obs_seq=obs_seq ) action_recon = mode_latent_model.reconstruct_action( features_seq=features_seq, mode_sample=posterior['samples'] ) ll =", "mode_latent_model.reconstruct_action( features_seq=features_seq, mode_sample=posterior['samples'] ) ll = action_recon['dists'].log_prob(action_seq).mean(dim=0).sum() mse = F.mse_loss(action_recon['samples'], action_seq) ll.backward() gradients_per_transition", "obs_seq.size(seq_dim) obs_seq.requires_grad = True posterior, features_seq = mode_latent_model.sample_mode_posterior_with_features( obs_seq=obs_seq ) action_recon = mode_latent_model.reconstruct_action(", "data_dim = -1 batch_size = obs_seq.size(batch_dim) seq_len = obs_seq.size(seq_dim) obs_seq.requires_grad = True posterior,", "the reconstruction error. Transition that have a high gradient on ll should be", "be changed further and therefore given a low reward. \"\"\" batch_dim = 0", "self_supervised.network.mode_latent_model import ModeLatentNetworkWithEncoder import rlkit.torch.pytorch_util as ptu def reconstruction_based_rewards( mode_latent_model: ModeLatentNetworkWithEncoder, obs_seq: torch.Tensor,", "from self_supervised.network.mode_latent_model import ModeLatentNetworkWithEncoder import rlkit.torch.pytorch_util as ptu def reconstruction_based_rewards( mode_latent_model: ModeLatentNetworkWithEncoder, obs_seq:", "as F from self_supervised.network.mode_latent_model import ModeLatentNetworkWithEncoder import rlkit.torch.pytorch_util as ptu def reconstruction_based_rewards( mode_latent_model:", "features_seq=features_seq, mode_sample=posterior['samples'] ) ll = action_recon['dists'].log_prob(action_seq).mean(dim=0).sum() mse = F.mse_loss(action_recon['samples'], action_seq) ll.backward() gradients_per_transition =", "def reconstruction_based_rewards( mode_latent_model: ModeLatentNetworkWithEncoder, obs_seq: torch.Tensor, action_seq: torch.Tensor, skill_seq: torch.Tensor, )->torch.Tensor: \"\"\" Args:", "torch.Tensor, skill_seq: torch.Tensor, )->torch.Tensor: \"\"\" Args: mode_latent_model : latent variable model obs_seq :", "mode latent model. Hence ll should be low. Changing transitions, that have a", "for the mode latent model. Hence ll should be low. Changing transitions, that", "gradient, should have high influence on the reconstruction error. Transition that have a", "= -2 data_dim = -1 batch_size = obs_seq.size(batch_dim) seq_len = obs_seq.size(seq_dim) obs_seq.requires_grad =", "torch import torch.nn.functional as F from self_supervised.network.mode_latent_model import ModeLatentNetworkWithEncoder import rlkit.torch.pytorch_util as ptu", "have a high gradient, should have high influence on the reconstruction error. Transition", "be low. Changing transitions, that have a high gradient, should have high influence", "low reward. \"\"\" batch_dim = 0 seq_dim = -2 data_dim = -1 batch_size", "= F.mse_loss(action_recon['samples'], action_seq) ll.backward() gradients_per_transition = obs_seq.grad.sum(dim=data_dim, keepdim=True) assert gradients_per_transition.shape == torch.Size((batch_size, seq_len,", "obs_seq: torch.Tensor, action_seq: torch.Tensor, skill_seq: torch.Tensor, )->torch.Tensor: \"\"\" Args: mode_latent_model : latent variable", "Loss : tensor Overall goal for the SAC-Agent is a high reconstruction error", "influence on the reconstruction error. Transition that have a high gradient on ll", "True posterior, features_seq = mode_latent_model.sample_mode_posterior_with_features( obs_seq=obs_seq ) action_recon = mode_latent_model.reconstruct_action( features_seq=features_seq, mode_sample=posterior['samples'] )", "ModeLatentNetworkWithEncoder import rlkit.torch.pytorch_util as ptu def reconstruction_based_rewards( mode_latent_model: ModeLatentNetworkWithEncoder, obs_seq: torch.Tensor, action_seq: torch.Tensor,", "import torch.nn.functional as F from self_supervised.network.mode_latent_model import ModeLatentNetworkWithEncoder import rlkit.torch.pytorch_util as ptu def", "import ModeLatentNetworkWithEncoder import rlkit.torch.pytorch_util as ptu def reconstruction_based_rewards( mode_latent_model: ModeLatentNetworkWithEncoder, obs_seq: torch.Tensor, action_seq:", "action_seq: torch.Tensor, skill_seq: torch.Tensor, )->torch.Tensor: \"\"\" Args: mode_latent_model : latent variable model obs_seq", "obs_seq.size(batch_dim) seq_len = obs_seq.size(seq_dim) obs_seq.requires_grad = True posterior, features_seq = mode_latent_model.sample_mode_posterior_with_features( obs_seq=obs_seq )", "ModeLatentNetworkWithEncoder, obs_seq: torch.Tensor, action_seq: torch.Tensor, skill_seq: torch.Tensor, )->torch.Tensor: \"\"\" Args: mode_latent_model : latent", "0 seq_dim = -2 data_dim = -1 batch_size = obs_seq.size(batch_dim) seq_len = obs_seq.size(seq_dim)", "(N, S, action_dim) tensor skill_seq : (N, S, skill_dim) tensor Return: Loss :", ": (N, S, skill_dim) tensor Return: Loss : tensor Overall goal for the", "S, skill_dim) tensor Return: Loss : tensor Overall goal for the SAC-Agent is", ": (N, S, obs_dim) tensor action_seq : (N, S, action_dim) tensor skill_seq :", "Transition that have a high gradient on ll should be changed further and", "obs_seq.requires_grad = True posterior, features_seq = mode_latent_model.sample_mode_posterior_with_features( obs_seq=obs_seq ) action_recon = mode_latent_model.reconstruct_action( features_seq=features_seq,", "as ptu def reconstruction_based_rewards( mode_latent_model: ModeLatentNetworkWithEncoder, obs_seq: torch.Tensor, action_seq: torch.Tensor, skill_seq: torch.Tensor, )->torch.Tensor:", "variable model obs_seq : (N, S, obs_dim) tensor action_seq : (N, S, action_dim)", "batch_size = obs_seq.size(batch_dim) seq_len = obs_seq.size(seq_dim) obs_seq.requires_grad = True posterior, features_seq = mode_latent_model.sample_mode_posterior_with_features(", "skill_dim) tensor Return: Loss : tensor Overall goal for the SAC-Agent is a", ": (N, S, action_dim) tensor skill_seq : (N, S, skill_dim) tensor Return: Loss", ") ll = action_recon['dists'].log_prob(action_seq).mean(dim=0).sum() mse = F.mse_loss(action_recon['samples'], action_seq) ll.backward() gradients_per_transition = obs_seq.grad.sum(dim=data_dim, keepdim=True)", "obs_dim) tensor action_seq : (N, S, action_dim) tensor skill_seq : (N, S, skill_dim)", "ll should be changed further and therefore given a low reward. \"\"\" batch_dim", "torch.nn.functional as F from self_supervised.network.mode_latent_model import ModeLatentNetworkWithEncoder import rlkit.torch.pytorch_util as ptu def reconstruction_based_rewards(", "<filename>self_supervised/loss/loss_intrin_selfsup.py import torch import torch.nn.functional as F from self_supervised.network.mode_latent_model import ModeLatentNetworkWithEncoder import rlkit.torch.pytorch_util", "a high gradient, should have high influence on the reconstruction error. Transition that", "error for the mode latent model. Hence ll should be low. Changing transitions,", "have a high gradient on ll should be changed further and therefore given", "a high gradient on ll should be changed further and therefore given a", "that have a high gradient, should have high influence on the reconstruction error.", "mode_latent_model: ModeLatentNetworkWithEncoder, obs_seq: torch.Tensor, action_seq: torch.Tensor, skill_seq: torch.Tensor, )->torch.Tensor: \"\"\" Args: mode_latent_model :", "transitions, that have a high gradient, should have high influence on the reconstruction", "= -1 batch_size = obs_seq.size(batch_dim) seq_len = obs_seq.size(seq_dim) obs_seq.requires_grad = True posterior, features_seq", "changed further and therefore given a low reward. \"\"\" batch_dim = 0 seq_dim", "ll.backward() gradients_per_transition = obs_seq.grad.sum(dim=data_dim, keepdim=True) assert gradients_per_transition.shape == torch.Size((batch_size, seq_len, 1)) return -torch.abs(gradients_per_transition)", "import rlkit.torch.pytorch_util as ptu def reconstruction_based_rewards( mode_latent_model: ModeLatentNetworkWithEncoder, obs_seq: torch.Tensor, action_seq: torch.Tensor, skill_seq:", "Changing transitions, that have a high gradient, should have high influence on the", "seq_len = obs_seq.size(seq_dim) obs_seq.requires_grad = True posterior, features_seq = mode_latent_model.sample_mode_posterior_with_features( obs_seq=obs_seq ) action_recon", "latent variable model obs_seq : (N, S, obs_dim) tensor action_seq : (N, S,", "high gradient, should have high influence on the reconstruction error. Transition that have", "F from self_supervised.network.mode_latent_model import ModeLatentNetworkWithEncoder import rlkit.torch.pytorch_util as ptu def reconstruction_based_rewards( mode_latent_model: ModeLatentNetworkWithEncoder,", "tensor action_seq : (N, S, action_dim) tensor skill_seq : (N, S, skill_dim) tensor", "seq_dim = -2 data_dim = -1 batch_size = obs_seq.size(batch_dim) seq_len = obs_seq.size(seq_dim) obs_seq.requires_grad", "SAC-Agent is a high reconstruction error for the mode latent model. Hence ll", "Args: mode_latent_model : latent variable model obs_seq : (N, S, obs_dim) tensor action_seq", "given a low reward. \"\"\" batch_dim = 0 seq_dim = -2 data_dim =", "mode_latent_model.sample_mode_posterior_with_features( obs_seq=obs_seq ) action_recon = mode_latent_model.reconstruct_action( features_seq=features_seq, mode_sample=posterior['samples'] ) ll = action_recon['dists'].log_prob(action_seq).mean(dim=0).sum() mse", "obs_seq : (N, S, obs_dim) tensor action_seq : (N, S, action_dim) tensor skill_seq", ")->torch.Tensor: \"\"\" Args: mode_latent_model : latent variable model obs_seq : (N, S, obs_dim)", "= action_recon['dists'].log_prob(action_seq).mean(dim=0).sum() mse = F.mse_loss(action_recon['samples'], action_seq) ll.backward() gradients_per_transition = obs_seq.grad.sum(dim=data_dim, keepdim=True) assert gradients_per_transition.shape", "= obs_seq.size(batch_dim) seq_len = obs_seq.size(seq_dim) obs_seq.requires_grad = True posterior, features_seq = mode_latent_model.sample_mode_posterior_with_features( obs_seq=obs_seq", "import torch import torch.nn.functional as F from self_supervised.network.mode_latent_model import ModeLatentNetworkWithEncoder import rlkit.torch.pytorch_util as", "tensor skill_seq : (N, S, skill_dim) tensor Return: Loss : tensor Overall goal", "that have a high gradient on ll should be changed further and therefore", "action_seq) ll.backward() gradients_per_transition = obs_seq.grad.sum(dim=data_dim, keepdim=True) assert gradients_per_transition.shape == torch.Size((batch_size, seq_len, 1)) return", "latent model. Hence ll should be low. Changing transitions, that have a high", "rlkit.torch.pytorch_util as ptu def reconstruction_based_rewards( mode_latent_model: ModeLatentNetworkWithEncoder, obs_seq: torch.Tensor, action_seq: torch.Tensor, skill_seq: torch.Tensor,", "action_dim) tensor skill_seq : (N, S, skill_dim) tensor Return: Loss : tensor Overall", "should be low. Changing transitions, that have a high gradient, should have high", "tensor Overall goal for the SAC-Agent is a high reconstruction error for the", ": tensor Overall goal for the SAC-Agent is a high reconstruction error for", "reconstruction error for the mode latent model. Hence ll should be low. Changing", "and therefore given a low reward. \"\"\" batch_dim = 0 seq_dim = -2", "S, obs_dim) tensor action_seq : (N, S, action_dim) tensor skill_seq : (N, S,", "model. Hence ll should be low. Changing transitions, that have a high gradient,", "= obs_seq.size(seq_dim) obs_seq.requires_grad = True posterior, features_seq = mode_latent_model.sample_mode_posterior_with_features( obs_seq=obs_seq ) action_recon =", ": latent variable model obs_seq : (N, S, obs_dim) tensor action_seq : (N,", "on the reconstruction error. Transition that have a high gradient on ll should", "S, action_dim) tensor skill_seq : (N, S, skill_dim) tensor Return: Loss : tensor", "a low reward. \"\"\" batch_dim = 0 seq_dim = -2 data_dim = -1", "ptu def reconstruction_based_rewards( mode_latent_model: ModeLatentNetworkWithEncoder, obs_seq: torch.Tensor, action_seq: torch.Tensor, skill_seq: torch.Tensor, )->torch.Tensor: \"\"\"", "action_recon['dists'].log_prob(action_seq).mean(dim=0).sum() mse = F.mse_loss(action_recon['samples'], action_seq) ll.backward() gradients_per_transition = obs_seq.grad.sum(dim=data_dim, keepdim=True) assert gradients_per_transition.shape ==", "gradient on ll should be changed further and therefore given a low reward.", "a high reconstruction error for the mode latent model. Hence ll should be", "(N, S, obs_dim) tensor action_seq : (N, S, action_dim) tensor skill_seq : (N,", "obs_seq=obs_seq ) action_recon = mode_latent_model.reconstruct_action( features_seq=features_seq, mode_sample=posterior['samples'] ) ll = action_recon['dists'].log_prob(action_seq).mean(dim=0).sum() mse =", "= 0 seq_dim = -2 data_dim = -1 batch_size = obs_seq.size(batch_dim) seq_len =", "= True posterior, features_seq = mode_latent_model.sample_mode_posterior_with_features( obs_seq=obs_seq ) action_recon = mode_latent_model.reconstruct_action( features_seq=features_seq, mode_sample=posterior['samples']", "reward. \"\"\" batch_dim = 0 seq_dim = -2 data_dim = -1 batch_size =", "torch.Tensor, )->torch.Tensor: \"\"\" Args: mode_latent_model : latent variable model obs_seq : (N, S,", "mse = F.mse_loss(action_recon['samples'], action_seq) ll.backward() gradients_per_transition = obs_seq.grad.sum(dim=data_dim, keepdim=True) assert gradients_per_transition.shape == torch.Size((batch_size,", "skill_seq: torch.Tensor, )->torch.Tensor: \"\"\" Args: mode_latent_model : latent variable model obs_seq : (N,", "Overall goal for the SAC-Agent is a high reconstruction error for the mode", "Hence ll should be low. Changing transitions, that have a high gradient, should", "ll should be low. Changing transitions, that have a high gradient, should have", "low. Changing transitions, that have a high gradient, should have high influence on", "reconstruction_based_rewards( mode_latent_model: ModeLatentNetworkWithEncoder, obs_seq: torch.Tensor, action_seq: torch.Tensor, skill_seq: torch.Tensor, )->torch.Tensor: \"\"\" Args: mode_latent_model", "\"\"\" batch_dim = 0 seq_dim = -2 data_dim = -1 batch_size = obs_seq.size(batch_dim)", "high reconstruction error for the mode latent model. Hence ll should be low.", "the mode latent model. Hence ll should be low. Changing transitions, that have", "should have high influence on the reconstruction error. Transition that have a high" ]
[]
[ "= [ ('addresses', '0001_initial'), ('orders', '0002_auto_20200204_1253'), ] operations = [ migrations.AddField( model_name='order', name='billing_address',", "12:55 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [", "] operations = [ migrations.AddField( model_name='order', name='billing_address', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='addresses.Address'), ), migrations.AddField(", "model_name='order', name='billing_address', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='addresses.Address'), ), migrations.AddField( model_name='order', name='shipping_address', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE,", "name='billing_address', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='addresses.Address'), ), migrations.AddField( model_name='order', name='shipping_address', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='shipping_address',", "Migration(migrations.Migration): dependencies = [ ('addresses', '0001_initial'), ('orders', '0002_auto_20200204_1253'), ] operations = [ migrations.AddField(", "field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='addresses.Address'), ), migrations.AddField( model_name='order', name='shipping_address', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='shipping_address', to='addresses.Address'),", "[ ('addresses', '0001_initial'), ('orders', '0002_auto_20200204_1253'), ] operations = [ migrations.AddField( model_name='order', name='billing_address', field=models.ForeignKey(blank=True,", "[ migrations.AddField( model_name='order', name='billing_address', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='addresses.Address'), ), migrations.AddField( model_name='order', name='shipping_address', field=models.ForeignKey(blank=True,", "on 2020-02-07 12:55 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies", "2020-02-07 12:55 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies =", "('addresses', '0001_initial'), ('orders', '0002_auto_20200204_1253'), ] operations = [ migrations.AddField( model_name='order', name='billing_address', field=models.ForeignKey(blank=True, null=True,", "dependencies = [ ('addresses', '0001_initial'), ('orders', '0002_auto_20200204_1253'), ] operations = [ migrations.AddField( model_name='order',", "Generated by Django 2.0 on 2020-02-07 12:55 from django.db import migrations, models import", "null=True, on_delete=django.db.models.deletion.CASCADE, to='addresses.Address'), ), migrations.AddField( model_name='order', name='shipping_address', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='shipping_address', to='addresses.Address'), ),", "on_delete=django.db.models.deletion.CASCADE, to='addresses.Address'), ), migrations.AddField( model_name='order', name='shipping_address', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='shipping_address', to='addresses.Address'), ), ]", "= [ migrations.AddField( model_name='order', name='billing_address', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='addresses.Address'), ), migrations.AddField( model_name='order', name='shipping_address',", "models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('addresses', '0001_initial'), ('orders', '0002_auto_20200204_1253'), ]", "import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('addresses', '0001_initial'), ('orders', '0002_auto_20200204_1253'), ] operations", "migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('addresses', '0001_initial'), ('orders', '0002_auto_20200204_1253'),", "2.0 on 2020-02-07 12:55 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration):", "django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('addresses', '0001_initial'), ('orders', '0002_auto_20200204_1253'), ] operations =", "operations = [ migrations.AddField( model_name='order', name='billing_address', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='addresses.Address'), ), migrations.AddField( model_name='order',", "'0002_auto_20200204_1253'), ] operations = [ migrations.AddField( model_name='order', name='billing_address', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='addresses.Address'), ),", "class Migration(migrations.Migration): dependencies = [ ('addresses', '0001_initial'), ('orders', '0002_auto_20200204_1253'), ] operations = [", "Django 2.0 on 2020-02-07 12:55 from django.db import migrations, models import django.db.models.deletion class", "'0001_initial'), ('orders', '0002_auto_20200204_1253'), ] operations = [ migrations.AddField( model_name='order', name='billing_address', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE,", "django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('addresses', '0001_initial'),", "# Generated by Django 2.0 on 2020-02-07 12:55 from django.db import migrations, models", "import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('addresses', '0001_initial'), ('orders',", "from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('addresses',", "('orders', '0002_auto_20200204_1253'), ] operations = [ migrations.AddField( model_name='order', name='billing_address', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='addresses.Address'),", "migrations.AddField( model_name='order', name='billing_address', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='addresses.Address'), ), migrations.AddField( model_name='order', name='shipping_address', field=models.ForeignKey(blank=True, null=True,", "by Django 2.0 on 2020-02-07 12:55 from django.db import migrations, models import django.db.models.deletion" ]
[ "Image.open(s) self.assertEqual(im.size, (500, 500)) self._assert_noerr(im) def test_g4_eq_png(self): \"\"\" Checking that we're actually getting", "if tag not in ignored: if tag.endswith('Resolution'): val = original[tag] self.assert_almost_equal( val[0][0]/val[0][1], value[0][0]/value[0][1],", "= Image.open(f) original = img.tag.named() reloaded = loaded.tag.named() # PhotometricInterpretation is set from", "f = self.tempfile('temp.tiff') img.save(f, tiffinfo=img.tag) loaded = Image.open(f) original = img.tag.named() reloaded =", "the same interpretation of the image as Imagemagick is? \"\"\" TiffImagePlugin.READ_LIBTIFF = True", "in a different form. out = self.tempfile(\"temp.png\") im.save(out) class TestFileLibTiff(LibTiffTestCase): def test_g4_tiff(self): \"\"\"Test", "path load path\"\"\" file = \"Tests/images/hopper_g4_500.tif\" im = Image.open(file) self.assertEqual(im.size, (500, 500)) self._assert_noerr(im)", "closed. self.assertRaises(OSError, lambda: os.fstat(fn)) self.assertRaises(OSError, lambda: os.close(fn)) def test_multipage(self): # issue #862 TiffImagePlugin.READ_LIBTIFF", "file = \"Tests/images/hopper_g4_500.tif\" orig = Image.open(file) out = self.tempfile(\"temp.tif\") rot = orig.transpose(Image.ROTATE_90) self.assertEqual(rot.size,", "self.assertEqual(im.size, (278, 374)) self.assertEqual( im.tile[0][:3], ('tiff_adobe_deflate', (0, 0, 278, 374), 0)) im.load() def", "roundtrip\" % tag) def test_g3_compression(self): i = Image.open('Tests/images/hopper_g4_500.tif') out = self.tempfile(\"temp.tif\") i.save(out, compression='group3')", "out = self.tempfile(\"temp.tif\") orig.tag[269] = 'temp.tif' orig.save(out) reread = Image.open(out) self.assertEqual('temp.tif', reread.tag[269]) def", "TiffImagePlugin.READ_LIBTIFF = False def test__next(self): TiffImagePlugin.READ_LIBTIFF = True im = Image.open('Tests/images/hopper.tif') self.assertFalse(im.tag.next) im.load()", "im.load() def test_write_metadata(self): \"\"\" Test metadata writing through libtiff \"\"\" img = Image.open('Tests/images/hopper_g4.tif')", "ord(b'\\x01')) else: self.assertEqual(b[0], b'\\xe0') self.assertEqual(b[1], b'\\x01') out = self.tempfile(\"temp.tif\") # out = \"temp.le.tif\"", "\"%s didn't roundtrip\" % tag) def test_g3_compression(self): i = Image.open('Tests/images/hopper_g4_500.tif') out = self.tempfile(\"temp.tif\")", "scale so that a 12bit FFF is 16bit FFF0, # so we need", "FFF0, # so we need to unshift so that the integer values are", "im.save(out, compression=compression) im2 = Image.open(out) self.assert_image_equal(im, im2) im.save(out, compression='jpeg') im2 = Image.open(out) self.assert_image_similar(im,", "save_bytesio(\"packbits\") save_bytesio(\"tiff_lzw\") TiffImagePlugin.WRITE_LIBTIFF = False TiffImagePlugin.READ_LIBTIFF = False if __name__ == '__main__': unittest.main()", "compression='tiff_adobe_deflate') im2 = Image.open(out) im2.load() self.assert_image_equal(im, im2) def test_compressions(self): im = hopper('RGB') out", "(little endian) if py3: self.assertEqual(b[0], ord(b'\\xe0')) self.assertEqual(b[1], ord(b'\\x01')) else: self.assertEqual(b[0], b'\\xe0') self.assertEqual(b[1], b'\\x01')", "self.assertNotEqual(orig.tobytes(), reread.tobytes()) def test_adobe_deflate_tiff(self): file = \"Tests/images/tiff_adobe_deflate.tif\" im = Image.open(file) self.assertEqual(im.mode, \"RGB\") self.assertEqual(im.size,", "def test_blur(self): # test case from irc, how to do blur on b/w", "def test_little_endian(self): im = Image.open('Tests/images/16bit.deflate.tif') self.assertEqual(im.getpixel((0, 0)), 480) self.assertEqual(im.mode, 'I;16') b = im.tobytes()", "not the original image. ignored = [ 'StripByteCounts', 'RowsPerStrip', 'PageNumber', 'PhotometricInterpretation'] for tag,", "the data actually load im.load() im.getdata() try: self.assertEqual(im._compression, 'group4') except: print(\"No _compression\") print", "% tag) else: self.assertEqual( original[tag], value, \"%s didn't roundtrip\" % tag) for tag,", "hopper('CMYK') out = self.tempfile('temp.tif') im.save(out, compression='tiff_adobe_deflate') im2 = Image.open(out) self.assert_image_equal(im, im2) def xtest_bw_compression_w_rgb(self):", "= self.tempfile(\"temp.tif\") im.save(out) reread = Image.open(out) self.assertEqual(reread.info['compression'], im.info['compression']) self.assertEqual(reread.getpixel((0, 0)), 480) def test_g4_string_info(self):", "RightShift 4 12in16bit2.tif # imagemagick will auto scale so that a 12bit FFF", "True im = Image.open('Tests/images/hopper.tif') self.assertFalse(im.tag.next) im.load() self.assertFalse(im.tag.next) def test_4bit(self): # Arrange test_file =", "i = Image.open('Tests/images/hopper_g4_500.tif') out = self.tempfile(\"temp.tif\") i.save(out, compression='group3') reread = Image.open(out) self.assertEqual(reread.info['compression'], 'group3')", "self.tempfile('temp.tif') im.save(out, compression='tiff_adobe_deflate') im2 = Image.open(out) self.assert_image_equal(im, im2) def xtest_bw_compression_w_rgb(self): \"\"\" This test", "import os import io from PIL import Image, TiffImagePlugin class LibTiffTestCase(PillowTestCase): def setUp(self):", "as f: im = Image.open(f) self.assertEqual(im.size, (500, 500)) self._assert_noerr(im) def test_g4_tiff_bytesio(self): \"\"\"Testing the", "issue #862 TiffImagePlugin.READ_LIBTIFF = True im = Image.open('Tests/images/multipage.tiff') # file is a multipage", "self.assertEqual(reread.info['compression'], im.info['compression']) self.assertEqual(reread.getpixel((0, 0)), 480) def test_g4_string_info(self): \"\"\"Tests String data in info directory\"\"\"", "0)), 480) # UNDONE - libtiff defaults to writing in native endian, so", "hopper, py3 import os import io from PIL import Image, TiffImagePlugin class LibTiffTestCase(PillowTestCase):", "a 12bit FFF is 16bit FFF0, # so we need to unshift so", "self.assertFalse(im.tag.next) im.load() self.assertFalse(im.tag.next) def test_4bit(self): # Arrange test_file = \"Tests/images/hopper_gray_4bpp.tif\" original = hopper(\"L\")", "saving to io.BytesIO() object. TiffImagePlugin.WRITE_LIBTIFF = True TiffImagePlugin.READ_LIBTIFF = True # Generate test", "= self.tempfile(\"temp.tif\") i.save(out, compression='group3') reread = Image.open(out) self.assertEqual(reread.info['compression'], 'group3') self.assert_image_equal(reread, i) def test_little_endian(self):", "out = self.tempfile('temp.tif') im.save(out, compression='tiff_adobe_deflate') im2 = Image.open(out) self.assert_image_equal(im, im2) def xtest_bw_compression_w_rgb(self): \"\"\"", "# to make the target -- # convert 12bit.cropped.tif -depth 16 tmp.tif #", "png) def test_g4_write(self): \"\"\"Checking to see that the saved image is the same", "self.assertTrue(im.tag.next) im.seek(1) self.assertEqual(im.size, (10, 10)) self.assertEqual(im.convert('RGB').getpixel((0, 0)), (255, 0, 0)) self.assertTrue(im.tag.next) im.seek(2) self.assertFalse(im.tag.next)", "im.load() im.getdata() try: self.assertEqual(im._compression, 'group4') except: print(\"No _compression\") print (dir(im)) # can we", "that we're actually getting the data that we expect\"\"\" png = Image.open('Tests/images/hopper_bw_500.png') g4", "tag.endswith('Resolution'): val = reloaded[tag] self.assert_almost_equal( val[0][0]/val[0][1], value[0][0]/value[0][1], msg=\"%s didn't roundtrip\" % tag) else:", "\"libtiff_encoder\" not in codecs or \"libtiff_decoder\" not in codecs: self.skipTest(\"tiff support not available\")", "Are we generating the same interpretation of the image as Imagemagick is? \"\"\"", "TiffImagePlugin.READ_LIBTIFF = False # to make the target -- # convert 12bit.cropped.tif -depth", "out = self.tempfile('temp.tif') for compression in ('packbits', 'tiff_lzw'): im.save(out, compression=compression) im2 = Image.open(out)", "green, 10x10 red, 20x20 blue im.seek(0) self.assertEqual(im.size, (10, 10)) self.assertEqual(im.convert('RGB').getpixel((0, 0)), (0, 128,", "from SAVE_INFO, # not the original image. ignored = [ 'StripByteCounts', 'RowsPerStrip', 'PageNumber',", "# PR 1011 # Test TIFF saving to io.BytesIO() object. TiffImagePlugin.WRITE_LIBTIFF = True", "auto scale so that a 12bit FFF is 16bit FFF0, # so we", "self.assertRaises(IOError, lambda: im.save(out, compression='group4')) def test_fp_leak(self): im = Image.open(\"Tests/images/hopper_g4_500.tif\") fn = im.fp.fileno() os.fstat(fn)", "irc, how to do blur on b/w image # and save to compressed", "else: self.assertEqual(b[0], b'\\x01') self.assertEqual(b[1], b'\\xe0') out = self.tempfile(\"temp.tif\") im.save(out) reread = Image.open(out) self.assertEqual(reread.info['compression'],", "the string load path\"\"\" file = \"Tests/images/hopper_g4_500.tif\" with open(file, 'rb') as f: im", "self.tempfile(\"temp.tif\") im.save(out) reread = Image.open(out) self.assertEqual(reread.info['compression'], im.info['compression']) self.assertEqual(reread.getpixel((0, 0)), 480) def test_g4_string_info(self): \"\"\"Tests", "val[0][0]/val[0][1], value[0][0]/value[0][1], msg=\"%s didn't roundtrip\" % tag) else: self.assertEqual( original[tag], value, \"%s didn't", "to unshift so that the integer values are the same. im2 = Image.open('Tests/images/12in16bit.tif')", "lambda: im.save(out, compression='group4')) def test_fp_leak(self): im = Image.open(\"Tests/images/hopper_g4_500.tif\") fn = im.fp.fileno() os.fstat(fn) im.load()", "are in image native order (little endian) if py3: self.assertEqual(b[0], ord(b'\\xe0')) self.assertEqual(b[1], ord(b'\\x01'))", "out = self.tempfile(\"temp.tif\") im.save(out) reread = Image.open(out) self.assertEqual(reread.info['compression'], im.info['compression']) self.assertEqual(reread.getpixel((0, 0)), 480) def", "not in ignored: if tag.endswith('Resolution'): val = original[tag] self.assert_almost_equal( val[0][0]/val[0][1], value[0][0]/value[0][1], msg=\"%s didn't", "(278, 374)) self.assertEqual( im.tile[0][:3], ('tiff_adobe_deflate', (0, 0, 278, 374), 0)) im.load() def test_write_metadata(self):", "def test_g4_tiff(self): \"\"\"Test the ordinary file path load path\"\"\" file = \"Tests/images/hopper_g4_500.tif\" im", "value in reloaded.items(): if tag not in ignored: if tag.endswith('Resolution'): val = original[tag]", "xtest_bw_compression_w_rgb(self): \"\"\" This test passes, but when running all tests causes a failure", "= hopper() def save_bytesio(compression=None): buffer_io = io.BytesIO() pilim.save(buffer_io, format=\"tiff\", compression=compression) buffer_io.seek(0) pilim_load =", "in codecs or \"libtiff_decoder\" not in codecs: self.skipTest(\"tiff support not available\") def _assert_noerr(self,", "Image.open(out) self.assertEqual(reread.size, (500, 500)) self._assert_noerr(reread) self.assert_image_equal(reread, rot) self.assertEqual(reread.info['compression'], 'group4') self.assertEqual(reread.info['compression'], orig.info['compression']) self.assertNotEqual(orig.tobytes(), reread.tobytes())", "im.save(out) reread = Image.open(out) self.assertEqual(reread.info['compression'], im.info['compression']) self.assertEqual(reread.getpixel((0, 0)), 480) # UNDONE - libtiff", "endian, so # on big endian, we'll get back mode = 'I;16B' here.", "will auto scale so that a 12bit FFF is 16bit FFF0, # so", "def test_cmyk_save(self): im = hopper('CMYK') out = self.tempfile('temp.tif') im.save(out, compression='tiff_adobe_deflate') im2 = Image.open(out)", "what we wrote\"\"\" file = \"Tests/images/hopper_g4_500.tif\" orig = Image.open(file) out = self.tempfile(\"temp.tif\") rot", "= self.tempfile('temp.tif') im = Image.open('Tests/images/pport_g4.tif') im = im.convert('L') im = im.filter(ImageFilter.GaussianBlur(4)) im.save(out, compression='tiff_adobe_deflate')", "is the same as what we wrote\"\"\" file = \"Tests/images/hopper_g4_500.tif\" orig = Image.open(file)", "% tag) for tag, value in original.items(): if tag not in ignored: if", "(500, 500)) self._assert_noerr(im) def test_g4_tiff_bytesio(self): \"\"\"Testing the stringio loading code path\"\"\" file =", "is? \"\"\" TiffImagePlugin.READ_LIBTIFF = True # Image.DEBUG = True im = Image.open('Tests/images/12bit.cropped.tif') im.load()", "self.assertEqual(reread.getpixel((0, 0)), 480) # UNDONE - libtiff defaults to writing in native endian,", "rot) self.assertEqual(reread.info['compression'], 'group4') self.assertEqual(reread.info['compression'], orig.info['compression']) self.assertNotEqual(orig.tobytes(), reread.tobytes()) def test_adobe_deflate_tiff(self): file = \"Tests/images/tiff_adobe_deflate.tif\" im", "im2) def test_blur(self): # test case from irc, how to do blur on", "print (im2.getpixel((0, 0))) print (im2.getpixel((0, 1))) print (im2.getpixel((0, 2))) self.assert_image_equal(im, im2) def test_blur(self):", "msg=\"%s didn't roundtrip\" % tag) else: self.assertEqual( value, reloaded[tag], \"%s didn't roundtrip\" %", "im = Image.open('Tests/images/16bit.MM.deflate.tif') self.assertEqual(im.getpixel((0, 0)), 480) self.assertEqual(im.mode, 'I;16B') b = im.tobytes() # Bytes", "\"libtiff_decoder\" not in codecs: self.skipTest(\"tiff support not available\") def _assert_noerr(self, im): \"\"\"Helper tests", "is 16bit FFF0, # so we need to unshift so that the integer", "= \"temp.le.tif\" im.save(out) reread = Image.open(out) self.assertEqual(reread.info['compression'], im.info['compression']) self.assertEqual(reread.getpixel((0, 0)), 480) # UNDONE", "in native endian, so # on big endian, we'll get back mode =", "format=\"tiff\", compression=compression) buffer_io.seek(0) pilim_load = Image.open(buffer_io) self.assert_image_similar(pilim, pilim_load, 0) # save_bytesio() save_bytesio('raw') save_bytesio(\"packbits\")", "os.close(fn)) def test_multipage(self): # issue #862 TiffImagePlugin.READ_LIBTIFF = True im = Image.open('Tests/images/multipage.tiff') #", "make the target -- # convert 12bit.cropped.tif -depth 16 tmp.tif # convert tmp.tif", "= reloaded[tag] self.assert_almost_equal( val[0][0]/val[0][1], value[0][0]/value[0][1], msg=\"%s didn't roundtrip\" % tag) else: self.assertEqual( value,", "self.assert_image_equal(reread, i) def test_little_endian(self): im = Image.open('Tests/images/16bit.deflate.tif') self.assertEqual(im.getpixel((0, 0)), 480) self.assertEqual(im.mode, 'I;16') b", "self.assert_almost_equal( val[0][0]/val[0][1], value[0][0]/value[0][1], msg=\"%s didn't roundtrip\" % tag) else: self.assertEqual( value, reloaded[tag], \"%s", "This test passes, but when running all tests causes a failure due to", "SAVE_INFO, # not the original image. ignored = [ 'StripByteCounts', 'RowsPerStrip', 'PageNumber', 'PhotometricInterpretation']", "im.load() TiffImagePlugin.READ_LIBTIFF = False # to make the target -- # convert 12bit.cropped.tif", "by libtiff. We need to capture that but not now\"\"\" im = hopper('RGB')", "\"Tests/images/pport_g4.tif\" im = Image.open(file) self._assert_noerr(im) def test_g4_tiff_file(self): \"\"\"Testing the string load path\"\"\" file", "data that we expect\"\"\" png = Image.open('Tests/images/g4-fillorder-test.png') g4 = Image.open('Tests/images/g4-fillorder-test.tif') self.assert_image_equal(g4, png) def", "Image.open(file) self._assert_noerr(im) def test_g4_tiff_file(self): \"\"\"Testing the string load path\"\"\" file = \"Tests/images/hopper_g4_500.tif\" with", "Image.open('Tests/images/hopper_bw_500.png') g4 = Image.open('Tests/images/hopper_g4_500.tif') self.assert_image_equal(g4, png) # see https://github.com/python-pillow/Pillow/issues/279 def test_g4_fillorder_eq_png(self): \"\"\" Checking", "if py3: self.assertEqual(b[0], ord(b'\\x01')) self.assertEqual(b[1], ord(b'\\xe0')) else: self.assertEqual(b[0], b'\\x01') self.assertEqual(b[1], b'\\xe0') out =", "self.tempfile(\"temp.tif\") orig.tag[269] = 'temp.tif' orig.save(out) reread = Image.open(out) self.assertEqual('temp.tif', reread.tag[269]) def test_12bit_rawmode(self): \"\"\"", "due to output on stderr from the error thrown by libtiff. We need", "20)) self.assertEqual(im.convert('RGB').getpixel((0, 0)), (0, 0, 255)) TiffImagePlugin.READ_LIBTIFF = False def test__next(self): TiffImagePlugin.READ_LIBTIFF =", "\"Tests/images/hopper_g4_500.tif\" im = Image.open(file) self.assertEqual(im.size, (500, 500)) self._assert_noerr(im) def test_g4_large(self): file = \"Tests/images/pport_g4.tif\"", "= Image.open('Tests/images/pport_g4.tif') im = im.convert('L') im = im.filter(ImageFilter.GaussianBlur(4)) im.save(out, compression='tiff_adobe_deflate') im2 = Image.open(out)", "image native order (little endian) if py3: self.assertEqual(b[0], ord(b'\\xe0')) self.assertEqual(b[1], ord(b'\\x01')) else: self.assertEqual(b[0],", "self.assert_almost_equal( val[0][0]/val[0][1], value[0][0]/value[0][1], msg=\"%s didn't roundtrip\" % tag) else: self.assertEqual( original[tag], value, \"%s", "io.BytesIO() with open(file, 'rb') as f: s.write(f.read()) s.seek(0) im = Image.open(s) self.assertEqual(im.size, (500,", "endian) if py3: self.assertEqual(b[0], ord(b'\\xe0')) self.assertEqual(b[1], ord(b'\\x01')) else: self.assertEqual(b[0], b'\\xe0') self.assertEqual(b[1], b'\\x01') out", "out = self.tempfile(\"temp.tif\") rot = orig.transpose(Image.ROTATE_90) self.assertEqual(rot.size, (500, 500)) rot.save(out) reread = Image.open(out)", "test_multipage(self): # issue #862 TiffImagePlugin.READ_LIBTIFF = True im = Image.open('Tests/images/multipage.tiff') # file is", "case from irc, how to do blur on b/w image # and save", "2))) self.assert_image_equal(im, im2) def test_blur(self): # test case from irc, how to do", "-evaluate RightShift 4 12in16bit2.tif # imagemagick will auto scale so that a 12bit", "causes a failure due to output on stderr from the error thrown by", "as f: s.write(f.read()) s.seek(0) im = Image.open(s) self.assertEqual(im.size, (500, 500)) self._assert_noerr(im) def test_g4_eq_png(self):", "# file is a multipage tiff, 10x10 green, 10x10 red, 20x20 blue im.seek(0)", "lambda: im.save(out, compression='tiff_ccitt')) self.assertRaises(IOError, lambda: im.save(out, compression='group3')) self.assertRaises(IOError, lambda: im.save(out, compression='group4')) def test_fp_leak(self):", "def test_g4_eq_png(self): \"\"\" Checking that we're actually getting the data that we expect\"\"\"", "big endian, we'll get back mode = 'I;16B' here. def test_big_endian(self): im =", "tmp.tif # convert tmp.tif -evaluate RightShift 4 12in16bit2.tif # imagemagick will auto scale", "12bit FFF is 16bit FFF0, # so we need to unshift so that", "= self.tempfile('temp.tif') self.assertRaises( IOError, lambda: im.save(out, compression='tiff_ccitt')) self.assertRaises(IOError, lambda: im.save(out, compression='group3')) self.assertRaises(IOError, lambda:", "the ordinary file path load path\"\"\" file = \"Tests/images/hopper_g4_500.tif\" im = Image.open(file) self.assertEqual(im.size,", "test_little_endian(self): im = Image.open('Tests/images/16bit.deflate.tif') self.assertEqual(im.getpixel((0, 0)), 480) self.assertEqual(im.mode, 'I;16') b = im.tobytes() #", "= Image.open(file) self.assertEqual(im.size, (500, 500)) self._assert_noerr(im) def test_g4_large(self): file = \"Tests/images/pport_g4.tif\" im =", "= hopper(\"L\") # Act TiffImagePlugin.READ_LIBTIFF = True im = Image.open(test_file) TiffImagePlugin.READ_LIBTIFF = False", "self.assertEqual(reread.getpixel((0, 0)), 480) def test_g4_string_info(self): \"\"\"Tests String data in info directory\"\"\" file =", "2))) print (im2.getpixel((0, 0))) print (im2.getpixel((0, 1))) print (im2.getpixel((0, 2))) self.assert_image_equal(im, im2) def", "'group4') except: print(\"No _compression\") print (dir(im)) # can we write it back out,", "'PhotometricInterpretation'] for tag, value in reloaded.items(): if tag not in ignored: if tag.endswith('Resolution'):", "= Image.open(out) self.assert_image_equal(im, im2) def xtest_bw_compression_w_rgb(self): \"\"\" This test passes, but when running", "need to capture that but not now\"\"\" im = hopper('RGB') out = self.tempfile('temp.tif')", "in codecs: self.skipTest(\"tiff support not available\") def _assert_noerr(self, im): \"\"\"Helper tests that assert", "= self.tempfile('temp.tiff') img.save(f, tiffinfo=img.tag) loaded = Image.open(f) original = img.tag.named() reloaded = loaded.tag.named()", "= Image.open('Tests/images/12in16bit.tif') if Image.DEBUG: print (im.getpixel((0, 0))) print (im.getpixel((0, 1))) print (im.getpixel((0, 2)))", "load path\"\"\" file = \"Tests/images/hopper_g4_500.tif\" with open(file, 'rb') as f: im = Image.open(f)", "# Bytes are in image native order (big endian) if py3: self.assertEqual(b[0], ord(b'\\x01'))", "img = Image.open('Tests/images/hopper_g4.tif') f = self.tempfile('temp.tiff') img.save(f, tiffinfo=img.tag) loaded = Image.open(f) original =", "ord(b'\\xe0')) else: self.assertEqual(b[0], b'\\x01') self.assertEqual(b[1], b'\\xe0') out = self.tempfile(\"temp.tif\") im.save(out) reread = Image.open(out)", "= self.tempfile(\"temp.tif\") orig.tag[269] = 'temp.tif' orig.save(out) reread = Image.open(out) self.assertEqual('temp.tif', reread.tag[269]) def test_12bit_rawmode(self):", "reloaded = loaded.tag.named() # PhotometricInterpretation is set from SAVE_INFO, # not the original", "self.assertEqual(reread.size, (500, 500)) self._assert_noerr(reread) self.assert_image_equal(reread, rot) self.assertEqual(reread.info['compression'], 'group4') self.assertEqual(reread.info['compression'], orig.info['compression']) self.assertNotEqual(orig.tobytes(), reread.tobytes()) def", "lambda: im.save(out, compression='group3')) self.assertRaises(IOError, lambda: im.save(out, compression='group4')) def test_fp_leak(self): im = Image.open(\"Tests/images/hopper_g4_500.tif\") fn", "= Image.open(f) self.assertEqual(im.size, (500, 500)) self._assert_noerr(im) def test_g4_tiff_bytesio(self): \"\"\"Testing the stringio loading code", "generating the same interpretation of the image as Imagemagick is? \"\"\" TiffImagePlugin.READ_LIBTIFF =", "https://github.com/python-pillow/Pillow/issues/279 def test_g4_fillorder_eq_png(self): \"\"\" Checking that we're actually getting the data that we", "in ignored: if tag.endswith('Resolution'): val = reloaded[tag] self.assert_almost_equal( val[0][0]/val[0][1], value[0][0]/value[0][1], msg=\"%s didn't roundtrip\"", "\"\"\"Helper tests that assert basic sanity about the g4 tiff reading\"\"\" # 1", "test image pilim = hopper() def save_bytesio(compression=None): buffer_io = io.BytesIO() pilim.save(buffer_io, format=\"tiff\", compression=compression)", "back out, in a different form. out = self.tempfile(\"temp.png\") im.save(out) class TestFileLibTiff(LibTiffTestCase): def", "f: s.write(f.read()) s.seek(0) im = Image.open(s) self.assertEqual(im.size, (500, 500)) self._assert_noerr(im) def test_g4_eq_png(self): \"\"\"", "tiffinfo=img.tag) loaded = Image.open(f) original = img.tag.named() reloaded = loaded.tag.named() # PhotometricInterpretation is", "value[0][0]/value[0][1], msg=\"%s didn't roundtrip\" % tag) else: self.assertEqual( value, reloaded[tag], \"%s didn't roundtrip\"", "expect\"\"\" png = Image.open('Tests/images/hopper_bw_500.png') g4 = Image.open('Tests/images/hopper_g4_500.tif') self.assert_image_equal(g4, png) # see https://github.com/python-pillow/Pillow/issues/279 def", "we're actually getting the data that we expect\"\"\" png = Image.open('Tests/images/hopper_bw_500.png') g4 =", "self.assert_image_equal(im, im2) def test_blur(self): # test case from irc, how to do blur", "= hopper('CMYK') out = self.tempfile('temp.tif') im.save(out, compression='tiff_adobe_deflate') im2 = Image.open(out) self.assert_image_equal(im, im2) def", "Image, TiffImagePlugin class LibTiffTestCase(PillowTestCase): def setUp(self): codecs = dir(Image.core) if \"libtiff_encoder\" not in", "= Image.open(file) self.assertEqual(im.mode, \"RGB\") self.assertEqual(im.size, (278, 374)) self.assertEqual( im.tile[0][:3], ('tiff_adobe_deflate', (0, 0, 278,", "tag, value in original.items(): if tag not in ignored: if tag.endswith('Resolution'): val =", "374)) self.assertEqual( im.tile[0][:3], ('tiff_adobe_deflate', (0, 0, 278, 374), 0)) im.load() def test_write_metadata(self): \"\"\"", "red, 20x20 blue im.seek(0) self.assertEqual(im.size, (10, 10)) self.assertEqual(im.convert('RGB').getpixel((0, 0)), (0, 128, 0)) self.assertTrue(im.tag.next)", "img.save(f, tiffinfo=img.tag) loaded = Image.open(f) original = img.tag.named() reloaded = loaded.tag.named() # PhotometricInterpretation", "'tiff_lzw'): im.save(out, compression=compression) im2 = Image.open(out) self.assert_image_equal(im, im2) im.save(out, compression='jpeg') im2 = Image.open(out)", "from PIL import Image, TiffImagePlugin class LibTiffTestCase(PillowTestCase): def setUp(self): codecs = dir(Image.core) if", "even more closed. self.assertRaises(OSError, lambda: os.fstat(fn)) self.assertRaises(OSError, lambda: os.close(fn)) def test_multipage(self): # issue", "Image.open('Tests/images/multipage.tiff') # file is a multipage tiff, 10x10 green, 10x10 red, 20x20 blue", "failure due to output on stderr from the error thrown by libtiff. We", "buffer_io.seek(0) pilim_load = Image.open(buffer_io) self.assert_image_similar(pilim, pilim_load, 0) # save_bytesio() save_bytesio('raw') save_bytesio(\"packbits\") save_bytesio(\"tiff_lzw\") TiffImagePlugin.WRITE_LIBTIFF", "test_file = \"Tests/images/hopper_gray_4bpp.tif\" original = hopper(\"L\") # Act TiffImagePlugin.READ_LIBTIFF = True im =", "self.assertEqual(rot.size, (500, 500)) rot.save(out) reread = Image.open(out) self.assertEqual(reread.size, (500, 500)) self._assert_noerr(reread) self.assert_image_equal(reread, rot)", "os import io from PIL import Image, TiffImagePlugin class LibTiffTestCase(PillowTestCase): def setUp(self): codecs", "didn't roundtrip\" % tag) for tag, value in original.items(): if tag not in", "the original image. ignored = [ 'StripByteCounts', 'RowsPerStrip', 'PageNumber', 'PhotometricInterpretation'] for tag, value", "def test_multipage(self): # issue #862 TiffImagePlugin.READ_LIBTIFF = True im = Image.open('Tests/images/multipage.tiff') # file", "hopper(\"L\") # Act TiffImagePlugin.READ_LIBTIFF = True im = Image.open(test_file) TiffImagePlugin.READ_LIBTIFF = False #", "-- # convert 12bit.cropped.tif -depth 16 tmp.tif # convert tmp.tif -evaluate RightShift 4", "self.assertEqual(b[1], ord(b'\\xe0')) else: self.assertEqual(b[0], b'\\x01') self.assertEqual(b[1], b'\\xe0') out = self.tempfile(\"temp.tif\") im.save(out) reread =", "500)) rot.save(out) reread = Image.open(out) self.assertEqual(reread.size, (500, 500)) self._assert_noerr(reread) self.assert_image_equal(reread, rot) self.assertEqual(reread.info['compression'], 'group4')", "test_g4_string_info(self): \"\"\"Tests String data in info directory\"\"\" file = \"Tests/images/hopper_g4_500.tif\" orig = Image.open(file)", "code path\"\"\" file = \"Tests/images/hopper_g4_500.tif\" s = io.BytesIO() with open(file, 'rb') as f:", "def test_12bit_rawmode(self): \"\"\" Are we generating the same interpretation of the image as", "'temp.tif' orig.save(out) reread = Image.open(out) self.assertEqual('temp.tif', reread.tag[269]) def test_12bit_rawmode(self): \"\"\" Are we generating", "7.3) def test_save_bytesio(self): # PR 1011 # Test TIFF saving to io.BytesIO() object.", "TIFF saving to io.BytesIO() object. TiffImagePlugin.WRITE_LIBTIFF = True TiffImagePlugin.READ_LIBTIFF = True # Generate", "True TiffImagePlugin.READ_LIBTIFF = True # Generate test image pilim = hopper() def save_bytesio(compression=None):", "b/w image # and save to compressed tif. from PIL import ImageFilter out", "# out = \"temp.le.tif\" im.save(out) reread = Image.open(out) self.assertEqual(reread.info['compression'], im.info['compression']) self.assertEqual(reread.getpixel((0, 0)), 480)", "tests that assert basic sanity about the g4 tiff reading\"\"\" # 1 bit", "interpretation of the image as Imagemagick is? \"\"\" TiffImagePlugin.READ_LIBTIFF = True # Image.DEBUG", "or \"libtiff_decoder\" not in codecs: self.skipTest(\"tiff support not available\") def _assert_noerr(self, im): \"\"\"Helper", "0)) im.load() def test_write_metadata(self): \"\"\" Test metadata writing through libtiff \"\"\" img =", "image as Imagemagick is? \"\"\" TiffImagePlugin.READ_LIBTIFF = True # Image.DEBUG = True im", "= Image.open('Tests/images/16bit.deflate.tif') self.assertEqual(im.getpixel((0, 0)), 480) self.assertEqual(im.mode, 'I;16') b = im.tobytes() # Bytes are", "Image.open('Tests/images/g4-fillorder-test.tif') self.assert_image_equal(g4, png) def test_g4_write(self): \"\"\"Checking to see that the saved image is", "can we write it back out, in a different form. out = self.tempfile(\"temp.png\")", "self.assertEqual(im.size, (500, 500)) self._assert_noerr(im) def test_g4_large(self): file = \"Tests/images/pport_g4.tif\" im = Image.open(file) self._assert_noerr(im)", "self.tempfile(\"temp.tif\") rot = orig.transpose(Image.ROTATE_90) self.assertEqual(rot.size, (500, 500)) rot.save(out) reread = Image.open(out) self.assertEqual(reread.size, (500,", "\"Tests/images/hopper_g4_500.tif\" orig = Image.open(file) out = self.tempfile(\"temp.tif\") orig.tag[269] = 'temp.tif' orig.save(out) reread =", "this should force even more closed. self.assertRaises(OSError, lambda: os.fstat(fn)) self.assertRaises(OSError, lambda: os.close(fn)) def", "endian, we'll get back mode = 'I;16B' here. def test_big_endian(self): im = Image.open('Tests/images/16bit.MM.deflate.tif')", "print (im2.getpixel((0, 2))) self.assert_image_equal(im, im2) def test_blur(self): # test case from irc, how", "hopper('RGB') out = self.tempfile('temp.tif') for compression in ('packbits', 'tiff_lzw'): im.save(out, compression=compression) im2 =", "= 'temp.tif' orig.save(out) reread = Image.open(out) self.assertEqual('temp.tif', reread.tag[269]) def test_12bit_rawmode(self): \"\"\" Are we", "to see that the saved image is the same as what we wrote\"\"\"", "object. TiffImagePlugin.WRITE_LIBTIFF = True TiffImagePlugin.READ_LIBTIFF = True # Generate test image pilim =", "im2) def xtest_bw_compression_w_rgb(self): \"\"\" This test passes, but when running all tests causes", "if tag.endswith('Resolution'): val = reloaded[tag] self.assert_almost_equal( val[0][0]/val[0][1], value[0][0]/value[0][1], msg=\"%s didn't roundtrip\" % tag)", "tag) for tag, value in original.items(): if tag not in ignored: if tag.endswith('Resolution'):", "self.assertEqual(im.mode, 'I;16B') b = im.tobytes() # Bytes are in image native order (big", "String data in info directory\"\"\" file = \"Tests/images/hopper_g4_500.tif\" orig = Image.open(file) out =", "actually load im.load() im.getdata() try: self.assertEqual(im._compression, 'group4') except: print(\"No _compression\") print (dir(im)) #", "getting the data that we expect\"\"\" png = Image.open('Tests/images/g4-fillorder-test.png') g4 = Image.open('Tests/images/g4-fillorder-test.tif') self.assert_image_equal(g4,", "= Image.open(out) self.assertEqual('temp.tif', reread.tag[269]) def test_12bit_rawmode(self): \"\"\" Are we generating the same interpretation", "Image.open('Tests/images/16bit.deflate.tif') self.assertEqual(im.getpixel((0, 0)), 480) self.assertEqual(im.mode, 'I;16') b = im.tobytes() # Bytes are in", "def xtest_bw_compression_w_rgb(self): \"\"\" This test passes, but when running all tests causes a", "'PageNumber', 'PhotometricInterpretation'] for tag, value in reloaded.items(): if tag not in ignored: if", "capture that but not now\"\"\" im = hopper('RGB') out = self.tempfile('temp.tif') self.assertRaises( IOError,", "tag not in ignored: if tag.endswith('Resolution'): val = original[tag] self.assert_almost_equal( val[0][0]/val[0][1], value[0][0]/value[0][1], msg=\"%s", "128, 0)) self.assertTrue(im.tag.next) im.seek(1) self.assertEqual(im.size, (10, 10)) self.assertEqual(im.convert('RGB').getpixel((0, 0)), (255, 0, 0)) self.assertTrue(im.tag.next)", "are the same. im2 = Image.open('Tests/images/12in16bit.tif') if Image.DEBUG: print (im.getpixel((0, 0))) print (im.getpixel((0,", "self.tempfile('temp.tiff') img.save(f, tiffinfo=img.tag) loaded = Image.open(f) original = img.tag.named() reloaded = loaded.tag.named() #", "b'\\xe0') self.assertEqual(b[1], b'\\x01') out = self.tempfile(\"temp.tif\") # out = \"temp.le.tif\" im.save(out) reread =", "this should close it. self.assertRaises(OSError, lambda: os.fstat(fn)) im = None # this should", "im2.load() self.assert_image_equal(im, im2) def test_compressions(self): im = hopper('RGB') out = self.tempfile('temp.tif') for compression", "UNDONE - libtiff defaults to writing in native endian, so # on big", "not in ignored: if tag.endswith('Resolution'): val = reloaded[tag] self.assert_almost_equal( val[0][0]/val[0][1], value[0][0]/value[0][1], msg=\"%s didn't", "not now\"\"\" im = hopper('RGB') out = self.tempfile('temp.tif') self.assertRaises( IOError, lambda: im.save(out, compression='tiff_ccitt'))", "we expect\"\"\" png = Image.open('Tests/images/hopper_bw_500.png') g4 = Image.open('Tests/images/hopper_g4_500.tif') self.assert_image_equal(g4, png) # see https://github.com/python-pillow/Pillow/issues/279", "Bytes are in image native order (big endian) if py3: self.assertEqual(b[0], ord(b'\\x01')) self.assertEqual(b[1],", "to make the target -- # convert 12bit.cropped.tif -depth 16 tmp.tif # convert", "= Image.open(out) im2.load() self.assert_image_equal(im, im2) def test_compressions(self): im = hopper('RGB') out = self.tempfile('temp.tif')", "in info directory\"\"\" file = \"Tests/images/hopper_g4_500.tif\" orig = Image.open(file) out = self.tempfile(\"temp.tif\") orig.tag[269]", "30) def test_cmyk_save(self): im = hopper('CMYK') out = self.tempfile('temp.tif') im.save(out, compression='tiff_adobe_deflate') im2 =", "\"temp.le.tif\" im.save(out) reread = Image.open(out) self.assertEqual(reread.info['compression'], im.info['compression']) self.assertEqual(reread.getpixel((0, 0)), 480) # UNDONE -", "file = \"Tests/images/pport_g4.tif\" im = Image.open(file) self._assert_noerr(im) def test_g4_tiff_file(self): \"\"\"Testing the string load", "libtiff. We need to capture that but not now\"\"\" im = hopper('RGB') out", "True # Generate test image pilim = hopper() def save_bytesio(compression=None): buffer_io = io.BytesIO()", "self.assertRaises(OSError, lambda: os.close(fn)) def test_multipage(self): # issue #862 TiffImagePlugin.READ_LIBTIFF = True im =", "self._assert_noerr(reread) self.assert_image_equal(reread, rot) self.assertEqual(reread.info['compression'], 'group4') self.assertEqual(reread.info['compression'], orig.info['compression']) self.assertNotEqual(orig.tobytes(), reread.tobytes()) def test_adobe_deflate_tiff(self): file =", "we expect\"\"\" png = Image.open('Tests/images/g4-fillorder-test.png') g4 = Image.open('Tests/images/g4-fillorder-test.tif') self.assert_image_equal(g4, png) def test_g4_write(self): \"\"\"Checking", "codecs or \"libtiff_decoder\" not in codecs: self.skipTest(\"tiff support not available\") def _assert_noerr(self, im):", "string load path\"\"\" file = \"Tests/images/hopper_g4_500.tif\" with open(file, 'rb') as f: im =", "image # and save to compressed tif. from PIL import ImageFilter out =", "= original[tag] self.assert_almost_equal( val[0][0]/val[0][1], value[0][0]/value[0][1], msg=\"%s didn't roundtrip\" % tag) else: self.assertEqual( original[tag],", "else: self.assertEqual( original[tag], value, \"%s didn't roundtrip\" % tag) for tag, value in", "im.getdata() try: self.assertEqual(im._compression, 'group4') except: print(\"No _compression\") print (dir(im)) # can we write", "Image.open('Tests/images/12bit.cropped.tif') im.load() TiffImagePlugin.READ_LIBTIFF = False # to make the target -- # convert", "self._assert_noerr(im) def test_g4_tiff_file(self): \"\"\"Testing the string load path\"\"\" file = \"Tests/images/hopper_g4_500.tif\" with open(file,", "test case from irc, how to do blur on b/w image # and", "values are the same. im2 = Image.open('Tests/images/12in16bit.tif') if Image.DEBUG: print (im.getpixel((0, 0))) print", "open(file, 'rb') as f: im = Image.open(f) self.assertEqual(im.size, (500, 500)) self._assert_noerr(im) def test_g4_tiff_bytesio(self):", "if Image.DEBUG: print (im.getpixel((0, 0))) print (im.getpixel((0, 1))) print (im.getpixel((0, 2))) print (im2.getpixel((0,", "reloaded[tag] self.assert_almost_equal( val[0][0]/val[0][1], value[0][0]/value[0][1], msg=\"%s didn't roundtrip\" % tag) else: self.assertEqual( value, reloaded[tag],", "test_adobe_deflate_tiff(self): file = \"Tests/images/tiff_adobe_deflate.tif\" im = Image.open(file) self.assertEqual(im.mode, \"RGB\") self.assertEqual(im.size, (278, 374)) self.assertEqual(", "setUp(self): codecs = dir(Image.core) if \"libtiff_encoder\" not in codecs or \"libtiff_decoder\" not in", "tag) def test_g3_compression(self): i = Image.open('Tests/images/hopper_g4_500.tif') out = self.tempfile(\"temp.tif\") i.save(out, compression='group3') reread =", "dir(Image.core) if \"libtiff_encoder\" not in codecs or \"libtiff_decoder\" not in codecs: self.skipTest(\"tiff support", "the data that we expect\"\"\" png = Image.open('Tests/images/g4-fillorder-test.png') g4 = Image.open('Tests/images/g4-fillorder-test.tif') self.assert_image_equal(g4, png)", "that assert basic sanity about the g4 tiff reading\"\"\" # 1 bit self.assertEqual(im.mode,", "= 'I;16B' here. def test_big_endian(self): im = Image.open('Tests/images/16bit.MM.deflate.tif') self.assertEqual(im.getpixel((0, 0)), 480) self.assertEqual(im.mode, 'I;16B')", "write it back out, in a different form. out = self.tempfile(\"temp.png\") im.save(out) class", "actually getting the data that we expect\"\"\" png = Image.open('Tests/images/g4-fillorder-test.png') g4 = Image.open('Tests/images/g4-fillorder-test.tif')", "self.tempfile('temp.tif') self.assertRaises( IOError, lambda: im.save(out, compression='tiff_ccitt')) self.assertRaises(IOError, lambda: im.save(out, compression='group3')) self.assertRaises(IOError, lambda: im.save(out,", "= \"Tests/images/tiff_adobe_deflate.tif\" im = Image.open(file) self.assertEqual(im.mode, \"RGB\") self.assertEqual(im.size, (278, 374)) self.assertEqual( im.tile[0][:3], ('tiff_adobe_deflate',", "self.assertRaises(IOError, lambda: im.save(out, compression='group3')) self.assertRaises(IOError, lambda: im.save(out, compression='group4')) def test_fp_leak(self): im = Image.open(\"Tests/images/hopper_g4_500.tif\")", "orig = Image.open(file) out = self.tempfile(\"temp.tif\") orig.tag[269] = 'temp.tif' orig.save(out) reread = Image.open(out)", "im = Image.open(test_file) TiffImagePlugin.READ_LIBTIFF = False # Assert self.assertEqual(im.size, (128, 128)) self.assertEqual(im.mode, \"L\")", "all tests causes a failure due to output on stderr from the error", "multipage tiff, 10x10 green, 10x10 red, 20x20 blue im.seek(0) self.assertEqual(im.size, (10, 10)) self.assertEqual(im.convert('RGB').getpixel((0,", "same interpretation of the image as Imagemagick is? \"\"\" TiffImagePlugin.READ_LIBTIFF = True #", "= True TiffImagePlugin.READ_LIBTIFF = True # Generate test image pilim = hopper() def", "(0, 0, 278, 374), 0)) im.load() def test_write_metadata(self): \"\"\" Test metadata writing through", "# see https://github.com/python-pillow/Pillow/issues/279 def test_g4_fillorder_eq_png(self): \"\"\" Checking that we're actually getting the data", "orig.save(out) reread = Image.open(out) self.assertEqual('temp.tif', reread.tag[269]) def test_12bit_rawmode(self): \"\"\" Are we generating the", "\"\"\"Test the ordinary file path load path\"\"\" file = \"Tests/images/hopper_g4_500.tif\" im = Image.open(file)", "def test_g3_compression(self): i = Image.open('Tests/images/hopper_g4_500.tif') out = self.tempfile(\"temp.tif\") i.save(out, compression='group3') reread = Image.open(out)", "self.assertFalse(im.tag.next) def test_4bit(self): # Arrange test_file = \"Tests/images/hopper_gray_4bpp.tif\" original = hopper(\"L\") # Act", "self.assertRaises( IOError, lambda: im.save(out, compression='tiff_ccitt')) self.assertRaises(IOError, lambda: im.save(out, compression='group3')) self.assertRaises(IOError, lambda: im.save(out, compression='group4'))", "save_bytesio('raw') save_bytesio(\"packbits\") save_bytesio(\"tiff_lzw\") TiffImagePlugin.WRITE_LIBTIFF = False TiffImagePlugin.READ_LIBTIFF = False if __name__ == '__main__':", "= self.tempfile(\"temp.tif\") rot = orig.transpose(Image.ROTATE_90) self.assertEqual(rot.size, (500, 500)) rot.save(out) reread = Image.open(out) self.assertEqual(reread.size,", "now\"\"\" im = hopper('RGB') out = self.tempfile('temp.tif') self.assertRaises( IOError, lambda: im.save(out, compression='tiff_ccitt')) self.assertRaises(IOError,", "0)) self.assertTrue(im.tag.next) im.seek(2) self.assertFalse(im.tag.next) self.assertEqual(im.size, (20, 20)) self.assertEqual(im.convert('RGB').getpixel((0, 0)), (0, 0, 255)) TiffImagePlugin.READ_LIBTIFF", "= im.convert('L') im = im.filter(ImageFilter.GaussianBlur(4)) im.save(out, compression='tiff_adobe_deflate') im2 = Image.open(out) im2.load() self.assert_image_equal(im, im2)", "LibTiffTestCase(PillowTestCase): def setUp(self): codecs = dir(Image.core) if \"libtiff_encoder\" not in codecs or \"libtiff_decoder\"", "compression=compression) buffer_io.seek(0) pilim_load = Image.open(buffer_io) self.assert_image_similar(pilim, pilim_load, 0) # save_bytesio() save_bytesio('raw') save_bytesio(\"packbits\") save_bytesio(\"tiff_lzw\")", "= Image.open('Tests/images/hopper_g4_500.tif') out = self.tempfile(\"temp.tif\") i.save(out, compression='group3') reread = Image.open(out) self.assertEqual(reread.info['compression'], 'group3') self.assert_image_equal(reread,", "print (im.getpixel((0, 1))) print (im.getpixel((0, 2))) print (im2.getpixel((0, 0))) print (im2.getpixel((0, 1))) print", "img.tag.named() reloaded = loaded.tag.named() # PhotometricInterpretation is set from SAVE_INFO, # not the", "compression='tiff_ccitt')) self.assertRaises(IOError, lambda: im.save(out, compression='group3')) self.assertRaises(IOError, lambda: im.save(out, compression='group4')) def test_fp_leak(self): im =", "True im = Image.open(test_file) TiffImagePlugin.READ_LIBTIFF = False # Assert self.assertEqual(im.size, (128, 128)) self.assertEqual(im.mode,", "that we're actually getting the data that we expect\"\"\" png = Image.open('Tests/images/g4-fillorder-test.png') g4", "def _assert_noerr(self, im): \"\"\"Helper tests that assert basic sanity about the g4 tiff", "reading\"\"\" # 1 bit self.assertEqual(im.mode, \"1\") # Does the data actually load im.load()", "we generating the same interpretation of the image as Imagemagick is? \"\"\" TiffImagePlugin.READ_LIBTIFF", "(big endian) if py3: self.assertEqual(b[0], ord(b'\\x01')) self.assertEqual(b[1], ord(b'\\xe0')) else: self.assertEqual(b[0], b'\\x01') self.assertEqual(b[1], b'\\xe0')", "(500, 500)) self._assert_noerr(im) def test_g4_large(self): file = \"Tests/images/pport_g4.tif\" im = Image.open(file) self._assert_noerr(im) def", "png) # see https://github.com/python-pillow/Pillow/issues/279 def test_g4_fillorder_eq_png(self): \"\"\" Checking that we're actually getting the", "os.fstat(fn)) im = None # this should force even more closed. self.assertRaises(OSError, lambda:", "tests causes a failure due to output on stderr from the error thrown", "Assert self.assertEqual(im.size, (128, 128)) self.assertEqual(im.mode, \"L\") self.assert_image_similar(im, original, 7.3) def test_save_bytesio(self): # PR", "self.assertEqual(reread.info['compression'], 'group3') self.assert_image_equal(reread, i) def test_little_endian(self): im = Image.open('Tests/images/16bit.deflate.tif') self.assertEqual(im.getpixel((0, 0)), 480) self.assertEqual(im.mode,", "test_12bit_rawmode(self): \"\"\" Are we generating the same interpretation of the image as Imagemagick", "integer values are the same. im2 = Image.open('Tests/images/12in16bit.tif') if Image.DEBUG: print (im.getpixel((0, 0)))", "= self.tempfile('temp.tif') im.save(out, compression='tiff_adobe_deflate') im2 = Image.open(out) self.assert_image_equal(im, im2) def xtest_bw_compression_w_rgb(self): \"\"\" This", "is a multipage tiff, 10x10 green, 10x10 red, 20x20 blue im.seek(0) self.assertEqual(im.size, (10,", "# Arrange test_file = \"Tests/images/hopper_gray_4bpp.tif\" original = hopper(\"L\") # Act TiffImagePlugin.READ_LIBTIFF = True", "TiffImagePlugin.WRITE_LIBTIFF = False TiffImagePlugin.READ_LIBTIFF = False if __name__ == '__main__': unittest.main() # End", "codecs = dir(Image.core) if \"libtiff_encoder\" not in codecs or \"libtiff_decoder\" not in codecs:", "# Assert self.assertEqual(im.size, (128, 128)) self.assertEqual(im.mode, \"L\") self.assert_image_similar(im, original, 7.3) def test_save_bytesio(self): #", "= Image.open('Tests/images/g4-fillorder-test.png') g4 = Image.open('Tests/images/g4-fillorder-test.tif') self.assert_image_equal(g4, png) def test_g4_write(self): \"\"\"Checking to see that", "= Image.open(out) self.assert_image_similar(im, im2, 30) def test_cmyk_save(self): im = hopper('CMYK') out = self.tempfile('temp.tif')", "so # on big endian, we'll get back mode = 'I;16B' here. def", "IOError, lambda: im.save(out, compression='tiff_ccitt')) self.assertRaises(IOError, lambda: im.save(out, compression='group3')) self.assertRaises(IOError, lambda: im.save(out, compression='group4')) def", "compression=compression) im2 = Image.open(out) self.assert_image_equal(im, im2) im.save(out, compression='jpeg') im2 = Image.open(out) self.assert_image_similar(im, im2,", "Bytes are in image native order (little endian) if py3: self.assertEqual(b[0], ord(b'\\xe0')) self.assertEqual(b[1],", "py3: self.assertEqual(b[0], ord(b'\\x01')) self.assertEqual(b[1], ord(b'\\xe0')) else: self.assertEqual(b[0], b'\\x01') self.assertEqual(b[1], b'\\xe0') out = self.tempfile(\"temp.tif\")", "tiff, 10x10 green, 10x10 red, 20x20 blue im.seek(0) self.assertEqual(im.size, (10, 10)) self.assertEqual(im.convert('RGB').getpixel((0, 0)),", "different form. out = self.tempfile(\"temp.png\") im.save(out) class TestFileLibTiff(LibTiffTestCase): def test_g4_tiff(self): \"\"\"Test the ordinary", "in image native order (big endian) if py3: self.assertEqual(b[0], ord(b'\\x01')) self.assertEqual(b[1], ord(b'\\xe0')) else:", "with open(file, 'rb') as f: s.write(f.read()) s.seek(0) im = Image.open(s) self.assertEqual(im.size, (500, 500))", "save_bytesio(\"tiff_lzw\") TiffImagePlugin.WRITE_LIBTIFF = False TiffImagePlugin.READ_LIBTIFF = False if __name__ == '__main__': unittest.main() #", "def test_compressions(self): im = hopper('RGB') out = self.tempfile('temp.tif') for compression in ('packbits', 'tiff_lzw'):", "roundtrip\" % tag) else: self.assertEqual( value, reloaded[tag], \"%s didn't roundtrip\" % tag) def", "= Image.open(out) self.assert_image_equal(im, im2) im.save(out, compression='jpeg') im2 = Image.open(out) self.assert_image_similar(im, im2, 30) def", "self.assertEqual(im.size, (128, 128)) self.assertEqual(im.mode, \"L\") self.assert_image_similar(im, original, 7.3) def test_save_bytesio(self): # PR 1011", "stringio loading code path\"\"\" file = \"Tests/images/hopper_g4_500.tif\" s = io.BytesIO() with open(file, 'rb')", "Generate test image pilim = hopper() def save_bytesio(compression=None): buffer_io = io.BytesIO() pilim.save(buffer_io, format=\"tiff\",", "True im = Image.open('Tests/images/multipage.tiff') # file is a multipage tiff, 10x10 green, 10x10", "about the g4 tiff reading\"\"\" # 1 bit self.assertEqual(im.mode, \"1\") # Does the", "to compressed tif. from PIL import ImageFilter out = self.tempfile('temp.tif') im = Image.open('Tests/images/pport_g4.tif')", "loaded.tag.named() # PhotometricInterpretation is set from SAVE_INFO, # not the original image. ignored", "self.assertEqual(im.mode, \"L\") self.assert_image_similar(im, original, 7.3) def test_save_bytesio(self): # PR 1011 # Test TIFF", "Image.open(out) self.assert_image_equal(im, im2) im.save(out, compression='jpeg') im2 = Image.open(out) self.assert_image_similar(im, im2, 30) def test_cmyk_save(self):", "reloaded.items(): if tag not in ignored: if tag.endswith('Resolution'): val = original[tag] self.assert_almost_equal( val[0][0]/val[0][1],", "when running all tests causes a failure due to output on stderr from", "self.tempfile(\"temp.png\") im.save(out) class TestFileLibTiff(LibTiffTestCase): def test_g4_tiff(self): \"\"\"Test the ordinary file path load path\"\"\"", "on stderr from the error thrown by libtiff. We need to capture that", "self.assert_image_similar(pilim, pilim_load, 0) # save_bytesio() save_bytesio('raw') save_bytesio(\"packbits\") save_bytesio(\"tiff_lzw\") TiffImagePlugin.WRITE_LIBTIFF = False TiffImagePlugin.READ_LIBTIFF =", "4 12in16bit2.tif # imagemagick will auto scale so that a 12bit FFF is", "False # Assert self.assertEqual(im.size, (128, 128)) self.assertEqual(im.mode, \"L\") self.assert_image_similar(im, original, 7.3) def test_save_bytesio(self):", "print (im.getpixel((0, 2))) print (im2.getpixel((0, 0))) print (im2.getpixel((0, 1))) print (im2.getpixel((0, 2))) self.assert_image_equal(im,", "im.tobytes() # Bytes are in image native order (little endian) if py3: self.assertEqual(b[0],", "io.BytesIO() object. TiffImagePlugin.WRITE_LIBTIFF = True TiffImagePlugin.READ_LIBTIFF = True # Generate test image pilim", "data actually load im.load() im.getdata() try: self.assertEqual(im._compression, 'group4') except: print(\"No _compression\") print (dir(im))", "and save to compressed tif. from PIL import ImageFilter out = self.tempfile('temp.tif') im", "self.assertRaises(OSError, lambda: os.fstat(fn)) self.assertRaises(OSError, lambda: os.close(fn)) def test_multipage(self): # issue #862 TiffImagePlugin.READ_LIBTIFF =", "self.tempfile(\"temp.tif\") i.save(out, compression='group3') reread = Image.open(out) self.assertEqual(reread.info['compression'], 'group3') self.assert_image_equal(reread, i) def test_little_endian(self): im", "'rb') as f: im = Image.open(f) self.assertEqual(im.size, (500, 500)) self._assert_noerr(im) def test_g4_tiff_bytesio(self): \"\"\"Testing", "# Does the data actually load im.load() im.getdata() try: self.assertEqual(im._compression, 'group4') except: print(\"No", "original = img.tag.named() reloaded = loaded.tag.named() # PhotometricInterpretation is set from SAVE_INFO, #", "False def test__next(self): TiffImagePlugin.READ_LIBTIFF = True im = Image.open('Tests/images/hopper.tif') self.assertFalse(im.tag.next) im.load() self.assertFalse(im.tag.next) def", "that the saved image is the same as what we wrote\"\"\" file =", "b = im.tobytes() # Bytes are in image native order (big endian) if", "('tiff_adobe_deflate', (0, 0, 278, 374), 0)) im.load() def test_write_metadata(self): \"\"\" Test metadata writing", "load im.load() im.getdata() try: self.assertEqual(im._compression, 'group4') except: print(\"No _compression\") print (dir(im)) # can", "0)), (255, 0, 0)) self.assertTrue(im.tag.next) im.seek(2) self.assertFalse(im.tag.next) self.assertEqual(im.size, (20, 20)) self.assertEqual(im.convert('RGB').getpixel((0, 0)), (0,", "\"\"\"Testing the string load path\"\"\" file = \"Tests/images/hopper_g4_500.tif\" with open(file, 'rb') as f:", "a failure due to output on stderr from the error thrown by libtiff.", "the integer values are the same. im2 = Image.open('Tests/images/12in16bit.tif') if Image.DEBUG: print (im.getpixel((0,", "available\") def _assert_noerr(self, im): \"\"\"Helper tests that assert basic sanity about the g4", "'rb') as f: s.write(f.read()) s.seek(0) im = Image.open(s) self.assertEqual(im.size, (500, 500)) self._assert_noerr(im) def", "test_g4_tiff(self): \"\"\"Test the ordinary file path load path\"\"\" file = \"Tests/images/hopper_g4_500.tif\" im =", "original image. ignored = [ 'StripByteCounts', 'RowsPerStrip', 'PageNumber', 'PhotometricInterpretation'] for tag, value in", "500)) self._assert_noerr(im) def test_g4_eq_png(self): \"\"\" Checking that we're actually getting the data that", "getting the data that we expect\"\"\" png = Image.open('Tests/images/hopper_bw_500.png') g4 = Image.open('Tests/images/hopper_g4_500.tif') self.assert_image_equal(g4,", "# test case from irc, how to do blur on b/w image #", "Image.open(file) self.assertEqual(im.mode, \"RGB\") self.assertEqual(im.size, (278, 374)) self.assertEqual( im.tile[0][:3], ('tiff_adobe_deflate', (0, 0, 278, 374),", "1011 # Test TIFF saving to io.BytesIO() object. TiffImagePlugin.WRITE_LIBTIFF = True TiffImagePlugin.READ_LIBTIFF =", "(500, 500)) self._assert_noerr(reread) self.assert_image_equal(reread, rot) self.assertEqual(reread.info['compression'], 'group4') self.assertEqual(reread.info['compression'], orig.info['compression']) self.assertNotEqual(orig.tobytes(), reread.tobytes()) def test_adobe_deflate_tiff(self):", "native endian, so # on big endian, we'll get back mode = 'I;16B'", "defaults to writing in native endian, so # on big endian, we'll get", "file = \"Tests/images/hopper_g4_500.tif\" orig = Image.open(file) out = self.tempfile(\"temp.tif\") orig.tag[269] = 'temp.tif' orig.save(out)", "didn't roundtrip\" % tag) def test_g3_compression(self): i = Image.open('Tests/images/hopper_g4_500.tif') out = self.tempfile(\"temp.tif\") i.save(out,", "convert tmp.tif -evaluate RightShift 4 12in16bit2.tif # imagemagick will auto scale so that", "im = None # this should force even more closed. self.assertRaises(OSError, lambda: os.fstat(fn))", "orig = Image.open(file) out = self.tempfile(\"temp.tif\") rot = orig.transpose(Image.ROTATE_90) self.assertEqual(rot.size, (500, 500)) rot.save(out)", "lambda: os.close(fn)) def test_multipage(self): # issue #862 TiffImagePlugin.READ_LIBTIFF = True im = Image.open('Tests/images/multipage.tiff')", "Checking that we're actually getting the data that we expect\"\"\" png = Image.open('Tests/images/g4-fillorder-test.png')", "as what we wrote\"\"\" file = \"Tests/images/hopper_g4_500.tif\" orig = Image.open(file) out = self.tempfile(\"temp.tif\")", "#862 TiffImagePlugin.READ_LIBTIFF = True im = Image.open('Tests/images/multipage.tiff') # file is a multipage tiff,", "blur on b/w image # and save to compressed tif. from PIL import", "original[tag], value, \"%s didn't roundtrip\" % tag) for tag, value in original.items(): if", "10x10 green, 10x10 red, 20x20 blue im.seek(0) self.assertEqual(im.size, (10, 10)) self.assertEqual(im.convert('RGB').getpixel((0, 0)), (0,", "self.tempfile(\"temp.tif\") # out = \"temp.le.tif\" im.save(out) reread = Image.open(out) self.assertEqual(reread.info['compression'], im.info['compression']) self.assertEqual(reread.getpixel((0, 0)),", "0, 0)) self.assertTrue(im.tag.next) im.seek(2) self.assertFalse(im.tag.next) self.assertEqual(im.size, (20, 20)) self.assertEqual(im.convert('RGB').getpixel((0, 0)), (0, 0, 255))", "ordinary file path load path\"\"\" file = \"Tests/images/hopper_g4_500.tif\" im = Image.open(file) self.assertEqual(im.size, (500,", "value, \"%s didn't roundtrip\" % tag) for tag, value in original.items(): if tag", "# Bytes are in image native order (little endian) if py3: self.assertEqual(b[0], ord(b'\\xe0'))", "Checking that we're actually getting the data that we expect\"\"\" png = Image.open('Tests/images/hopper_bw_500.png')", "# Act TiffImagePlugin.READ_LIBTIFF = True im = Image.open(test_file) TiffImagePlugin.READ_LIBTIFF = False # Assert", "loaded = Image.open(f) original = img.tag.named() reloaded = loaded.tag.named() # PhotometricInterpretation is set", "on b/w image # and save to compressed tif. from PIL import ImageFilter", "Test TIFF saving to io.BytesIO() object. TiffImagePlugin.WRITE_LIBTIFF = True TiffImagePlugin.READ_LIBTIFF = True #", "see https://github.com/python-pillow/Pillow/issues/279 def test_g4_fillorder_eq_png(self): \"\"\" Checking that we're actually getting the data that", "500)) self._assert_noerr(reread) self.assert_image_equal(reread, rot) self.assertEqual(reread.info['compression'], 'group4') self.assertEqual(reread.info['compression'], orig.info['compression']) self.assertNotEqual(orig.tobytes(), reread.tobytes()) def test_adobe_deflate_tiff(self): file", "helper import unittest, PillowTestCase, hopper, py3 import os import io from PIL import", "order (big endian) if py3: self.assertEqual(b[0], ord(b'\\x01')) self.assertEqual(b[1], ord(b'\\xe0')) else: self.assertEqual(b[0], b'\\x01') self.assertEqual(b[1],", "(20, 20)) self.assertEqual(im.convert('RGB').getpixel((0, 0)), (0, 0, 255)) TiffImagePlugin.READ_LIBTIFF = False def test__next(self): TiffImagePlugin.READ_LIBTIFF", "\"\"\"Tests String data in info directory\"\"\" file = \"Tests/images/hopper_g4_500.tif\" orig = Image.open(file) out", "im = Image.open(f) self.assertEqual(im.size, (500, 500)) self._assert_noerr(im) def test_g4_tiff_bytesio(self): \"\"\"Testing the stringio loading", "ord(b'\\x01')) self.assertEqual(b[1], ord(b'\\xe0')) else: self.assertEqual(b[0], b'\\x01') self.assertEqual(b[1], b'\\xe0') out = self.tempfile(\"temp.tif\") im.save(out) reread", "im = hopper('RGB') out = self.tempfile('temp.tif') self.assertRaises( IOError, lambda: im.save(out, compression='tiff_ccitt')) self.assertRaises(IOError, lambda:", "= Image.open(s) self.assertEqual(im.size, (500, 500)) self._assert_noerr(im) def test_g4_eq_png(self): \"\"\" Checking that we're actually", "[ 'StripByteCounts', 'RowsPerStrip', 'PageNumber', 'PhotometricInterpretation'] for tag, value in reloaded.items(): if tag not", "im = Image.open('Tests/images/16bit.deflate.tif') self.assertEqual(im.getpixel((0, 0)), 480) self.assertEqual(im.mode, 'I;16') b = im.tobytes() # Bytes", "0, 255)) TiffImagePlugin.READ_LIBTIFF = False def test__next(self): TiffImagePlugin.READ_LIBTIFF = True im = Image.open('Tests/images/hopper.tif')", "it. self.assertRaises(OSError, lambda: os.fstat(fn)) im = None # this should force even more", "Image.open('Tests/images/hopper_g4_500.tif') self.assert_image_equal(g4, png) # see https://github.com/python-pillow/Pillow/issues/279 def test_g4_fillorder_eq_png(self): \"\"\" Checking that we're actually", "but not now\"\"\" im = hopper('RGB') out = self.tempfile('temp.tif') self.assertRaises( IOError, lambda: im.save(out,", "reread = Image.open(out) self.assertEqual('temp.tif', reread.tag[269]) def test_12bit_rawmode(self): \"\"\" Are we generating the same", "= dir(Image.core) if \"libtiff_encoder\" not in codecs or \"libtiff_decoder\" not in codecs: self.skipTest(\"tiff", "# UNDONE - libtiff defaults to writing in native endian, so # on", "test__next(self): TiffImagePlugin.READ_LIBTIFF = True im = Image.open('Tests/images/hopper.tif') self.assertFalse(im.tag.next) im.load() self.assertFalse(im.tag.next) def test_4bit(self): #", "codecs: self.skipTest(\"tiff support not available\") def _assert_noerr(self, im): \"\"\"Helper tests that assert basic", "the same as what we wrote\"\"\" file = \"Tests/images/hopper_g4_500.tif\" orig = Image.open(file) out", "Image.open(test_file) TiffImagePlugin.READ_LIBTIFF = False # Assert self.assertEqual(im.size, (128, 128)) self.assertEqual(im.mode, \"L\") self.assert_image_similar(im, original,", "print (im.getpixel((0, 0))) print (im.getpixel((0, 1))) print (im.getpixel((0, 2))) print (im2.getpixel((0, 0))) print", "s.write(f.read()) s.seek(0) im = Image.open(s) self.assertEqual(im.size, (500, 500)) self._assert_noerr(im) def test_g4_eq_png(self): \"\"\" Checking", "\"\"\"Checking to see that the saved image is the same as what we", "im.tile[0][:3], ('tiff_adobe_deflate', (0, 0, 278, 374), 0)) im.load() def test_write_metadata(self): \"\"\" Test metadata", "metadata writing through libtiff \"\"\" img = Image.open('Tests/images/hopper_g4.tif') f = self.tempfile('temp.tiff') img.save(f, tiffinfo=img.tag)", "else: self.assertEqual(b[0], b'\\xe0') self.assertEqual(b[1], b'\\x01') out = self.tempfile(\"temp.tif\") # out = \"temp.le.tif\" im.save(out)", "# imagemagick will auto scale so that a 12bit FFF is 16bit FFF0,", "try: self.assertEqual(im._compression, 'group4') except: print(\"No _compression\") print (dir(im)) # can we write it", "# PhotometricInterpretation is set from SAVE_INFO, # not the original image. ignored =", "(im2.getpixel((0, 0))) print (im2.getpixel((0, 1))) print (im2.getpixel((0, 2))) self.assert_image_equal(im, im2) def test_blur(self): #", "from the error thrown by libtiff. We need to capture that but not", "not in codecs or \"libtiff_decoder\" not in codecs: self.skipTest(\"tiff support not available\") def", "Image.open('Tests/images/16bit.MM.deflate.tif') self.assertEqual(im.getpixel((0, 0)), 480) self.assertEqual(im.mode, 'I;16B') b = im.tobytes() # Bytes are in", "255)) TiffImagePlugin.READ_LIBTIFF = False def test__next(self): TiffImagePlugin.READ_LIBTIFF = True im = Image.open('Tests/images/hopper.tif') self.assertFalse(im.tag.next)", "Image.open(file) out = self.tempfile(\"temp.tif\") rot = orig.transpose(Image.ROTATE_90) self.assertEqual(rot.size, (500, 500)) rot.save(out) reread =", "self.tempfile('temp.tif') im = Image.open('Tests/images/pport_g4.tif') im = im.convert('L') im = im.filter(ImageFilter.GaussianBlur(4)) im.save(out, compression='tiff_adobe_deflate') im2", "load path\"\"\" file = \"Tests/images/hopper_g4_500.tif\" im = Image.open(file) self.assertEqual(im.size, (500, 500)) self._assert_noerr(im) def", "im = Image.open(file) self.assertEqual(im.size, (500, 500)) self._assert_noerr(im) def test_g4_large(self): file = \"Tests/images/pport_g4.tif\" im", "= im.fp.fileno() os.fstat(fn) im.load() # this should close it. self.assertRaises(OSError, lambda: os.fstat(fn)) im", "else: self.assertEqual( value, reloaded[tag], \"%s didn't roundtrip\" % tag) def test_g3_compression(self): i =", "# and save to compressed tif. from PIL import ImageFilter out = self.tempfile('temp.tif')", "% tag) else: self.assertEqual( value, reloaded[tag], \"%s didn't roundtrip\" % tag) def test_g3_compression(self):", "Image.open(f) original = img.tag.named() reloaded = loaded.tag.named() # PhotometricInterpretation is set from SAVE_INFO,", "(im2.getpixel((0, 2))) self.assert_image_equal(im, im2) def test_blur(self): # test case from irc, how to", "info directory\"\"\" file = \"Tests/images/hopper_g4_500.tif\" orig = Image.open(file) out = self.tempfile(\"temp.tif\") orig.tag[269] =", "im.seek(2) self.assertFalse(im.tag.next) self.assertEqual(im.size, (20, 20)) self.assertEqual(im.convert('RGB').getpixel((0, 0)), (0, 0, 255)) TiffImagePlugin.READ_LIBTIFF = False", "im = Image.open('Tests/images/hopper.tif') self.assertFalse(im.tag.next) im.load() self.assertFalse(im.tag.next) def test_4bit(self): # Arrange test_file = \"Tests/images/hopper_gray_4bpp.tif\"", "Image.open(buffer_io) self.assert_image_similar(pilim, pilim_load, 0) # save_bytesio() save_bytesio('raw') save_bytesio(\"packbits\") save_bytesio(\"tiff_lzw\") TiffImagePlugin.WRITE_LIBTIFF = False TiffImagePlugin.READ_LIBTIFF", "form. out = self.tempfile(\"temp.png\") im.save(out) class TestFileLibTiff(LibTiffTestCase): def test_g4_tiff(self): \"\"\"Test the ordinary file", "the image as Imagemagick is? \"\"\" TiffImagePlugin.READ_LIBTIFF = True # Image.DEBUG = True", "\"RGB\") self.assertEqual(im.size, (278, 374)) self.assertEqual( im.tile[0][:3], ('tiff_adobe_deflate', (0, 0, 278, 374), 0)) im.load()", "def test_g4_large(self): file = \"Tests/images/pport_g4.tif\" im = Image.open(file) self._assert_noerr(im) def test_g4_tiff_file(self): \"\"\"Testing the", "compression='tiff_adobe_deflate') im2 = Image.open(out) self.assert_image_equal(im, im2) def xtest_bw_compression_w_rgb(self): \"\"\" This test passes, but", "None # this should force even more closed. self.assertRaises(OSError, lambda: os.fstat(fn)) self.assertRaises(OSError, lambda:", "hopper() def save_bytesio(compression=None): buffer_io = io.BytesIO() pilim.save(buffer_io, format=\"tiff\", compression=compression) buffer_io.seek(0) pilim_load = Image.open(buffer_io)", "the same. im2 = Image.open('Tests/images/12in16bit.tif') if Image.DEBUG: print (im.getpixel((0, 0))) print (im.getpixel((0, 1)))", "= Image.open('Tests/images/hopper.tif') self.assertFalse(im.tag.next) im.load() self.assertFalse(im.tag.next) def test_4bit(self): # Arrange test_file = \"Tests/images/hopper_gray_4bpp.tif\" original", "b'\\x01') self.assertEqual(b[1], b'\\xe0') out = self.tempfile(\"temp.tif\") im.save(out) reread = Image.open(out) self.assertEqual(reread.info['compression'], im.info['compression']) self.assertEqual(reread.getpixel((0,", "how to do blur on b/w image # and save to compressed tif.", "= False # Assert self.assertEqual(im.size, (128, 128)) self.assertEqual(im.mode, \"L\") self.assert_image_similar(im, original, 7.3) def", "self.assertEqual(im.getpixel((0, 0)), 480) self.assertEqual(im.mode, 'I;16') b = im.tobytes() # Bytes are in image", "= Image.open(out) self.assertEqual(reread.info['compression'], im.info['compression']) self.assertEqual(reread.getpixel((0, 0)), 480) # UNDONE - libtiff defaults to", "that we expect\"\"\" png = Image.open('Tests/images/hopper_bw_500.png') g4 = Image.open('Tests/images/hopper_g4_500.tif') self.assert_image_equal(g4, png) # see", "we wrote\"\"\" file = \"Tests/images/hopper_g4_500.tif\" orig = Image.open(file) out = self.tempfile(\"temp.tif\") rot =", "self.assert_image_similar(im, original, 7.3) def test_save_bytesio(self): # PR 1011 # Test TIFF saving to", "TiffImagePlugin.READ_LIBTIFF = True # Image.DEBUG = True im = Image.open('Tests/images/12bit.cropped.tif') im.load() TiffImagePlugin.READ_LIBTIFF =", "file = \"Tests/images/hopper_g4_500.tif\" im = Image.open(file) self.assertEqual(im.size, (500, 500)) self._assert_noerr(im) def test_g4_large(self): file", "self.assertFalse(im.tag.next) self.assertEqual(im.size, (20, 20)) self.assertEqual(im.convert('RGB').getpixel((0, 0)), (0, 0, 255)) TiffImagePlugin.READ_LIBTIFF = False def", "image is the same as what we wrote\"\"\" file = \"Tests/images/hopper_g4_500.tif\" orig =", "running all tests causes a failure due to output on stderr from the", "im.save(out) class TestFileLibTiff(LibTiffTestCase): def test_g4_tiff(self): \"\"\"Test the ordinary file path load path\"\"\" file", "480) # UNDONE - libtiff defaults to writing in native endian, so #", "\"Tests/images/hopper_gray_4bpp.tif\" original = hopper(\"L\") # Act TiffImagePlugin.READ_LIBTIFF = True im = Image.open(test_file) TiffImagePlugin.READ_LIBTIFF", "0)), 480) def test_g4_string_info(self): \"\"\"Tests String data in info directory\"\"\" file = \"Tests/images/hopper_g4_500.tif\"", "1))) print (im.getpixel((0, 2))) print (im2.getpixel((0, 0))) print (im2.getpixel((0, 1))) print (im2.getpixel((0, 2)))", "on big endian, we'll get back mode = 'I;16B' here. def test_big_endian(self): im", "= False # to make the target -- # convert 12bit.cropped.tif -depth 16", "fn = im.fp.fileno() os.fstat(fn) im.load() # this should close it. self.assertRaises(OSError, lambda: os.fstat(fn))", "= hopper('RGB') out = self.tempfile('temp.tif') self.assertRaises( IOError, lambda: im.save(out, compression='tiff_ccitt')) self.assertRaises(IOError, lambda: im.save(out,", "support not available\") def _assert_noerr(self, im): \"\"\"Helper tests that assert basic sanity about", "= im.tobytes() # Bytes are in image native order (big endian) if py3:", "self.assert_image_equal(im, im2) im.save(out, compression='jpeg') im2 = Image.open(out) self.assert_image_similar(im, im2, 30) def test_cmyk_save(self): im", "original.items(): if tag not in ignored: if tag.endswith('Resolution'): val = reloaded[tag] self.assert_almost_equal( val[0][0]/val[0][1],", "def test_g4_string_info(self): \"\"\"Tests String data in info directory\"\"\" file = \"Tests/images/hopper_g4_500.tif\" orig =", "self.skipTest(\"tiff support not available\") def _assert_noerr(self, im): \"\"\"Helper tests that assert basic sanity", "py3 import os import io from PIL import Image, TiffImagePlugin class LibTiffTestCase(PillowTestCase): def", "pilim.save(buffer_io, format=\"tiff\", compression=compression) buffer_io.seek(0) pilim_load = Image.open(buffer_io) self.assert_image_similar(pilim, pilim_load, 0) # save_bytesio() save_bytesio('raw')", "should close it. self.assertRaises(OSError, lambda: os.fstat(fn)) im = None # this should force", "saved image is the same as what we wrote\"\"\" file = \"Tests/images/hopper_g4_500.tif\" orig", "test_g4_eq_png(self): \"\"\" Checking that we're actually getting the data that we expect\"\"\" png", "self.assertEqual('temp.tif', reread.tag[269]) def test_12bit_rawmode(self): \"\"\" Are we generating the same interpretation of the", "= Image.open(test_file) TiffImagePlugin.READ_LIBTIFF = False # Assert self.assertEqual(im.size, (128, 128)) self.assertEqual(im.mode, \"L\") self.assert_image_similar(im,", "image pilim = hopper() def save_bytesio(compression=None): buffer_io = io.BytesIO() pilim.save(buffer_io, format=\"tiff\", compression=compression) buffer_io.seek(0)", "through libtiff \"\"\" img = Image.open('Tests/images/hopper_g4.tif') f = self.tempfile('temp.tiff') img.save(f, tiffinfo=img.tag) loaded =", "(dir(im)) # can we write it back out, in a different form. out", "# this should force even more closed. self.assertRaises(OSError, lambda: os.fstat(fn)) self.assertRaises(OSError, lambda: os.close(fn))", "tiff reading\"\"\" # 1 bit self.assertEqual(im.mode, \"1\") # Does the data actually load", "TiffImagePlugin class LibTiffTestCase(PillowTestCase): def setUp(self): codecs = dir(Image.core) if \"libtiff_encoder\" not in codecs", "the stringio loading code path\"\"\" file = \"Tests/images/hopper_g4_500.tif\" s = io.BytesIO() with open(file,", "self.assertEqual(im.convert('RGB').getpixel((0, 0)), (255, 0, 0)) self.assertTrue(im.tag.next) im.seek(2) self.assertFalse(im.tag.next) self.assertEqual(im.size, (20, 20)) self.assertEqual(im.convert('RGB').getpixel((0, 0)),", "im.filter(ImageFilter.GaussianBlur(4)) im.save(out, compression='tiff_adobe_deflate') im2 = Image.open(out) im2.load() self.assert_image_equal(im, im2) def test_compressions(self): im =", "# Test TIFF saving to io.BytesIO() object. TiffImagePlugin.WRITE_LIBTIFF = True TiffImagePlugin.READ_LIBTIFF = True", "= self.tempfile(\"temp.png\") im.save(out) class TestFileLibTiff(LibTiffTestCase): def test_g4_tiff(self): \"\"\"Test the ordinary file path load", "self._assert_noerr(im) def test_g4_tiff_bytesio(self): \"\"\"Testing the stringio loading code path\"\"\" file = \"Tests/images/hopper_g4_500.tif\" s", "= hopper('RGB') out = self.tempfile('temp.tif') for compression in ('packbits', 'tiff_lzw'): im.save(out, compression=compression) im2", "= True im = Image.open('Tests/images/12bit.cropped.tif') im.load() TiffImagePlugin.READ_LIBTIFF = False # to make the", "ord(b'\\xe0')) self.assertEqual(b[1], ord(b'\\x01')) else: self.assertEqual(b[0], b'\\xe0') self.assertEqual(b[1], b'\\x01') out = self.tempfile(\"temp.tif\") # out", "msg=\"%s didn't roundtrip\" % tag) else: self.assertEqual( original[tag], value, \"%s didn't roundtrip\" %", "= True im = Image.open('Tests/images/multipage.tiff') # file is a multipage tiff, 10x10 green,", "% tag) def test_g3_compression(self): i = Image.open('Tests/images/hopper_g4_500.tif') out = self.tempfile(\"temp.tif\") i.save(out, compression='group3') reread", "reloaded[tag], \"%s didn't roundtrip\" % tag) def test_g3_compression(self): i = Image.open('Tests/images/hopper_g4_500.tif') out =", "= \"Tests/images/hopper_g4_500.tif\" im = Image.open(file) self.assertEqual(im.size, (500, 500)) self._assert_noerr(im) def test_g4_large(self): file =", "= False TiffImagePlugin.READ_LIBTIFF = False if __name__ == '__main__': unittest.main() # End of", "open(file, 'rb') as f: s.write(f.read()) s.seek(0) im = Image.open(s) self.assertEqual(im.size, (500, 500)) self._assert_noerr(im)", "self.assertEqual( im.tile[0][:3], ('tiff_adobe_deflate', (0, 0, 278, 374), 0)) im.load() def test_write_metadata(self): \"\"\" Test", "0)), 480) self.assertEqual(im.mode, 'I;16') b = im.tobytes() # Bytes are in image native", "1))) print (im2.getpixel((0, 2))) self.assert_image_equal(im, im2) def test_blur(self): # test case from irc,", "-depth 16 tmp.tif # convert tmp.tif -evaluate RightShift 4 12in16bit2.tif # imagemagick will", "original = hopper(\"L\") # Act TiffImagePlugin.READ_LIBTIFF = True im = Image.open(test_file) TiffImagePlugin.READ_LIBTIFF =", "native order (big endian) if py3: self.assertEqual(b[0], ord(b'\\x01')) self.assertEqual(b[1], ord(b'\\xe0')) else: self.assertEqual(b[0], b'\\x01')", "def test_4bit(self): # Arrange test_file = \"Tests/images/hopper_gray_4bpp.tif\" original = hopper(\"L\") # Act TiffImagePlugin.READ_LIBTIFF", "\"Tests/images/hopper_g4_500.tif\" orig = Image.open(file) out = self.tempfile(\"temp.tif\") rot = orig.transpose(Image.ROTATE_90) self.assertEqual(rot.size, (500, 500))", "import Image, TiffImagePlugin class LibTiffTestCase(PillowTestCase): def setUp(self): codecs = dir(Image.core) if \"libtiff_encoder\" not", "# issue #862 TiffImagePlugin.READ_LIBTIFF = True im = Image.open('Tests/images/multipage.tiff') # file is a", "the g4 tiff reading\"\"\" # 1 bit self.assertEqual(im.mode, \"1\") # Does the data", "'group4') self.assertEqual(reread.info['compression'], orig.info['compression']) self.assertNotEqual(orig.tobytes(), reread.tobytes()) def test_adobe_deflate_tiff(self): file = \"Tests/images/tiff_adobe_deflate.tif\" im = Image.open(file)", "the data that we expect\"\"\" png = Image.open('Tests/images/hopper_bw_500.png') g4 = Image.open('Tests/images/hopper_g4_500.tif') self.assert_image_equal(g4, png)", "ignored = [ 'StripByteCounts', 'RowsPerStrip', 'PageNumber', 'PhotometricInterpretation'] for tag, value in reloaded.items(): if", "tmp.tif -evaluate RightShift 4 12in16bit2.tif # imagemagick will auto scale so that a", "wrote\"\"\" file = \"Tests/images/hopper_g4_500.tif\" orig = Image.open(file) out = self.tempfile(\"temp.tif\") rot = orig.transpose(Image.ROTATE_90)", "print(\"No _compression\") print (dir(im)) # can we write it back out, in a", "self.assertTrue(im.tag.next) im.seek(2) self.assertFalse(im.tag.next) self.assertEqual(im.size, (20, 20)) self.assertEqual(im.convert('RGB').getpixel((0, 0)), (0, 0, 255)) TiffImagePlugin.READ_LIBTIFF =", "the error thrown by libtiff. We need to capture that but not now\"\"\"", "self.assertEqual(im.size, (20, 20)) self.assertEqual(im.convert('RGB').getpixel((0, 0)), (0, 0, 255)) TiffImagePlugin.READ_LIBTIFF = False def test__next(self):", "278, 374), 0)) im.load() def test_write_metadata(self): \"\"\" Test metadata writing through libtiff \"\"\"", "test_blur(self): # test case from irc, how to do blur on b/w image", "(255, 0, 0)) self.assertTrue(im.tag.next) im.seek(2) self.assertFalse(im.tag.next) self.assertEqual(im.size, (20, 20)) self.assertEqual(im.convert('RGB').getpixel((0, 0)), (0, 0,", "im.save(out, compression='jpeg') im2 = Image.open(out) self.assert_image_similar(im, im2, 30) def test_cmyk_save(self): im = hopper('CMYK')", "stderr from the error thrown by libtiff. We need to capture that but", "10x10 red, 20x20 blue im.seek(0) self.assertEqual(im.size, (10, 10)) self.assertEqual(im.convert('RGB').getpixel((0, 0)), (0, 128, 0))", "more closed. self.assertRaises(OSError, lambda: os.fstat(fn)) self.assertRaises(OSError, lambda: os.close(fn)) def test_multipage(self): # issue #862", "= Image.open('Tests/images/hopper_bw_500.png') g4 = Image.open('Tests/images/hopper_g4_500.tif') self.assert_image_equal(g4, png) # see https://github.com/python-pillow/Pillow/issues/279 def test_g4_fillorder_eq_png(self): \"\"\"", "val[0][0]/val[0][1], value[0][0]/value[0][1], msg=\"%s didn't roundtrip\" % tag) else: self.assertEqual( value, reloaded[tag], \"%s didn't", "bit self.assertEqual(im.mode, \"1\") # Does the data actually load im.load() im.getdata() try: self.assertEqual(im._compression,", "Does the data actually load im.load() im.getdata() try: self.assertEqual(im._compression, 'group4') except: print(\"No _compression\")", "\"%s didn't roundtrip\" % tag) for tag, value in original.items(): if tag not", "# can we write it back out, in a different form. out =", "test_g3_compression(self): i = Image.open('Tests/images/hopper_g4_500.tif') out = self.tempfile(\"temp.tif\") i.save(out, compression='group3') reread = Image.open(out) self.assertEqual(reread.info['compression'],", "TiffImagePlugin.READ_LIBTIFF = True im = Image.open(test_file) TiffImagePlugin.READ_LIBTIFF = False # Assert self.assertEqual(im.size, (128,", "def setUp(self): codecs = dir(Image.core) if \"libtiff_encoder\" not in codecs or \"libtiff_decoder\" not", "im): \"\"\"Helper tests that assert basic sanity about the g4 tiff reading\"\"\" #", "libtiff \"\"\" img = Image.open('Tests/images/hopper_g4.tif') f = self.tempfile('temp.tiff') img.save(f, tiffinfo=img.tag) loaded = Image.open(f)", "buffer_io = io.BytesIO() pilim.save(buffer_io, format=\"tiff\", compression=compression) buffer_io.seek(0) pilim_load = Image.open(buffer_io) self.assert_image_similar(pilim, pilim_load, 0)", "im2 = Image.open('Tests/images/12in16bit.tif') if Image.DEBUG: print (im.getpixel((0, 0))) print (im.getpixel((0, 1))) print (im.getpixel((0,", "save_bytesio(compression=None): buffer_io = io.BytesIO() pilim.save(buffer_io, format=\"tiff\", compression=compression) buffer_io.seek(0) pilim_load = Image.open(buffer_io) self.assert_image_similar(pilim, pilim_load,", "Image.open(out) self.assert_image_equal(im, im2) def xtest_bw_compression_w_rgb(self): \"\"\" This test passes, but when running all", "out = self.tempfile('temp.tif') self.assertRaises( IOError, lambda: im.save(out, compression='tiff_ccitt')) self.assertRaises(IOError, lambda: im.save(out, compression='group3')) self.assertRaises(IOError,", "TestFileLibTiff(LibTiffTestCase): def test_g4_tiff(self): \"\"\"Test the ordinary file path load path\"\"\" file = \"Tests/images/hopper_g4_500.tif\"", "\"\"\" TiffImagePlugin.READ_LIBTIFF = True # Image.DEBUG = True im = Image.open('Tests/images/12bit.cropped.tif') im.load() TiffImagePlugin.READ_LIBTIFF", "in ('packbits', 'tiff_lzw'): im.save(out, compression=compression) im2 = Image.open(out) self.assert_image_equal(im, im2) im.save(out, compression='jpeg') im2", "print (dir(im)) # can we write it back out, in a different form.", "so that the integer values are the same. im2 = Image.open('Tests/images/12in16bit.tif') if Image.DEBUG:", "= \"Tests/images/hopper_g4_500.tif\" orig = Image.open(file) out = self.tempfile(\"temp.tif\") rot = orig.transpose(Image.ROTATE_90) self.assertEqual(rot.size, (500,", "Image.open(out) self.assertEqual(reread.info['compression'], 'group3') self.assert_image_equal(reread, i) def test_little_endian(self): im = Image.open('Tests/images/16bit.deflate.tif') self.assertEqual(im.getpixel((0, 0)), 480)", "if py3: self.assertEqual(b[0], ord(b'\\xe0')) self.assertEqual(b[1], ord(b'\\x01')) else: self.assertEqual(b[0], b'\\xe0') self.assertEqual(b[1], b'\\x01') out =", "path\"\"\" file = \"Tests/images/hopper_g4_500.tif\" with open(file, 'rb') as f: im = Image.open(f) self.assertEqual(im.size,", "b'\\xe0') out = self.tempfile(\"temp.tif\") im.save(out) reread = Image.open(out) self.assertEqual(reread.info['compression'], im.info['compression']) self.assertEqual(reread.getpixel((0, 0)), 480)", "= Image.open('Tests/images/hopper_g4.tif') f = self.tempfile('temp.tiff') img.save(f, tiffinfo=img.tag) loaded = Image.open(f) original = img.tag.named()", "im = Image.open('Tests/images/12bit.cropped.tif') im.load() TiffImagePlugin.READ_LIBTIFF = False # to make the target --", "= Image.open('Tests/images/12bit.cropped.tif') im.load() TiffImagePlugin.READ_LIBTIFF = False # to make the target -- #", "def test_fp_leak(self): im = Image.open(\"Tests/images/hopper_g4_500.tif\") fn = im.fp.fileno() os.fstat(fn) im.load() # this should", "PR 1011 # Test TIFF saving to io.BytesIO() object. TiffImagePlugin.WRITE_LIBTIFF = True TiffImagePlugin.READ_LIBTIFF", "ignored: if tag.endswith('Resolution'): val = reloaded[tag] self.assert_almost_equal( val[0][0]/val[0][1], value[0][0]/value[0][1], msg=\"%s didn't roundtrip\" %", "Act TiffImagePlugin.READ_LIBTIFF = True im = Image.open(test_file) TiffImagePlugin.READ_LIBTIFF = False # Assert self.assertEqual(im.size,", "orig.transpose(Image.ROTATE_90) self.assertEqual(rot.size, (500, 500)) rot.save(out) reread = Image.open(out) self.assertEqual(reread.size, (500, 500)) self._assert_noerr(reread) self.assert_image_equal(reread,", "= Image.open('Tests/images/multipage.tiff') # file is a multipage tiff, 10x10 green, 10x10 red, 20x20", "self.assertEqual(b[0], ord(b'\\xe0')) self.assertEqual(b[1], ord(b'\\x01')) else: self.assertEqual(b[0], b'\\xe0') self.assertEqual(b[1], b'\\x01') out = self.tempfile(\"temp.tif\") #", "self.assert_image_equal(g4, png) # see https://github.com/python-pillow/Pillow/issues/279 def test_g4_fillorder_eq_png(self): \"\"\" Checking that we're actually getting", "Image.open(out) im2.load() self.assert_image_equal(im, im2) def test_compressions(self): im = hopper('RGB') out = self.tempfile('temp.tif') for", "g4 tiff reading\"\"\" # 1 bit self.assertEqual(im.mode, \"1\") # Does the data actually", "= Image.open(out) self.assertEqual(reread.info['compression'], im.info['compression']) self.assertEqual(reread.getpixel((0, 0)), 480) def test_g4_string_info(self): \"\"\"Tests String data in", "self.assertEqual(reread.info['compression'], orig.info['compression']) self.assertNotEqual(orig.tobytes(), reread.tobytes()) def test_adobe_deflate_tiff(self): file = \"Tests/images/tiff_adobe_deflate.tif\" im = Image.open(file) self.assertEqual(im.mode,", "close it. self.assertRaises(OSError, lambda: os.fstat(fn)) im = None # this should force even", "in image native order (little endian) if py3: self.assertEqual(b[0], ord(b'\\xe0')) self.assertEqual(b[1], ord(b'\\x01')) else:", "we write it back out, in a different form. out = self.tempfile(\"temp.png\") im.save(out)", "lambda: os.fstat(fn)) self.assertRaises(OSError, lambda: os.close(fn)) def test_multipage(self): # issue #862 TiffImagePlugin.READ_LIBTIFF = True", "not in codecs: self.skipTest(\"tiff support not available\") def _assert_noerr(self, im): \"\"\"Helper tests that", "= self.tempfile('temp.tif') for compression in ('packbits', 'tiff_lzw'): im.save(out, compression=compression) im2 = Image.open(out) self.assert_image_equal(im,", "a multipage tiff, 10x10 green, 10x10 red, 20x20 blue im.seek(0) self.assertEqual(im.size, (10, 10))", "\"\"\" This test passes, but when running all tests causes a failure due", "value, reloaded[tag], \"%s didn't roundtrip\" % tag) def test_g3_compression(self): i = Image.open('Tests/images/hopper_g4_500.tif') out", "im.info['compression']) self.assertEqual(reread.getpixel((0, 0)), 480) def test_g4_string_info(self): \"\"\"Tests String data in info directory\"\"\" file", "self.assertEqual(b[1], b'\\x01') out = self.tempfile(\"temp.tif\") # out = \"temp.le.tif\" im.save(out) reread = Image.open(out)", "Image.open('Tests/images/12in16bit.tif') if Image.DEBUG: print (im.getpixel((0, 0))) print (im.getpixel((0, 1))) print (im.getpixel((0, 2))) print", "TiffImagePlugin.WRITE_LIBTIFF = True TiffImagePlugin.READ_LIBTIFF = True # Generate test image pilim = hopper()", "False # to make the target -- # convert 12bit.cropped.tif -depth 16 tmp.tif", "self.assertRaises(OSError, lambda: os.fstat(fn)) im = None # this should force even more closed.", "# Generate test image pilim = hopper() def save_bytesio(compression=None): buffer_io = io.BytesIO() pilim.save(buffer_io,", "0)), 480) self.assertEqual(im.mode, 'I;16B') b = im.tobytes() # Bytes are in image native", "= True # Generate test image pilim = hopper() def save_bytesio(compression=None): buffer_io =", "py3: self.assertEqual(b[0], ord(b'\\xe0')) self.assertEqual(b[1], ord(b'\\x01')) else: self.assertEqual(b[0], b'\\xe0') self.assertEqual(b[1], b'\\x01') out = self.tempfile(\"temp.tif\")", "500)) self._assert_noerr(im) def test_g4_tiff_bytesio(self): \"\"\"Testing the stringio loading code path\"\"\" file = \"Tests/images/hopper_g4_500.tif\"", "orig.info['compression']) self.assertNotEqual(orig.tobytes(), reread.tobytes()) def test_adobe_deflate_tiff(self): file = \"Tests/images/tiff_adobe_deflate.tif\" im = Image.open(file) self.assertEqual(im.mode, \"RGB\")", "thrown by libtiff. We need to capture that but not now\"\"\" im =", "= Image.open(file) self._assert_noerr(im) def test_g4_tiff_file(self): \"\"\"Testing the string load path\"\"\" file = \"Tests/images/hopper_g4_500.tif\"", "FFF is 16bit FFF0, # so we need to unshift so that the", "image native order (big endian) if py3: self.assertEqual(b[0], ord(b'\\x01')) self.assertEqual(b[1], ord(b'\\xe0')) else: self.assertEqual(b[0],", "self.assertEqual(im.size, (10, 10)) self.assertEqual(im.convert('RGB').getpixel((0, 0)), (0, 128, 0)) self.assertTrue(im.tag.next) im.seek(1) self.assertEqual(im.size, (10, 10))", "Imagemagick is? \"\"\" TiffImagePlugin.READ_LIBTIFF = True # Image.DEBUG = True im = Image.open('Tests/images/12bit.cropped.tif')", "\"L\") self.assert_image_similar(im, original, 7.3) def test_save_bytesio(self): # PR 1011 # Test TIFF saving", "\"\"\" Are we generating the same interpretation of the image as Imagemagick is?", "= Image.open('Tests/images/16bit.MM.deflate.tif') self.assertEqual(im.getpixel((0, 0)), 480) self.assertEqual(im.mode, 'I;16B') b = im.tobytes() # Bytes are", "out, in a different form. out = self.tempfile(\"temp.png\") im.save(out) class TestFileLibTiff(LibTiffTestCase): def test_g4_tiff(self):", "# save_bytesio() save_bytesio('raw') save_bytesio(\"packbits\") save_bytesio(\"tiff_lzw\") TiffImagePlugin.WRITE_LIBTIFF = False TiffImagePlugin.READ_LIBTIFF = False if __name__", "file = \"Tests/images/hopper_g4_500.tif\" with open(file, 'rb') as f: im = Image.open(f) self.assertEqual(im.size, (500,", "480) self.assertEqual(im.mode, 'I;16B') b = im.tobytes() # Bytes are in image native order", "def test_g4_tiff_bytesio(self): \"\"\"Testing the stringio loading code path\"\"\" file = \"Tests/images/hopper_g4_500.tif\" s =", "imagemagick will auto scale so that a 12bit FFF is 16bit FFF0, #", "path\"\"\" file = \"Tests/images/hopper_g4_500.tif\" im = Image.open(file) self.assertEqual(im.size, (500, 500)) self._assert_noerr(im) def test_g4_large(self):", "that the integer values are the same. im2 = Image.open('Tests/images/12in16bit.tif') if Image.DEBUG: print", "im.save(out, compression='tiff_ccitt')) self.assertRaises(IOError, lambda: im.save(out, compression='group3')) self.assertRaises(IOError, lambda: im.save(out, compression='group4')) def test_fp_leak(self): im", "self.assertEqual(b[0], ord(b'\\x01')) self.assertEqual(b[1], ord(b'\\xe0')) else: self.assertEqual(b[0], b'\\x01') self.assertEqual(b[1], b'\\xe0') out = self.tempfile(\"temp.tif\") im.save(out)", "that we expect\"\"\" png = Image.open('Tests/images/g4-fillorder-test.png') g4 = Image.open('Tests/images/g4-fillorder-test.tif') self.assert_image_equal(g4, png) def test_g4_write(self):", "\"\"\" Test metadata writing through libtiff \"\"\" img = Image.open('Tests/images/hopper_g4.tif') f = self.tempfile('temp.tiff')", "out = self.tempfile(\"temp.png\") im.save(out) class TestFileLibTiff(LibTiffTestCase): def test_g4_tiff(self): \"\"\"Test the ordinary file path", "im = Image.open(s) self.assertEqual(im.size, (500, 500)) self._assert_noerr(im) def test_g4_eq_png(self): \"\"\" Checking that we're", "- libtiff defaults to writing in native endian, so # on big endian,", "passes, but when running all tests causes a failure due to output on", "self.assert_image_equal(g4, png) def test_g4_write(self): \"\"\"Checking to see that the saved image is the", "rot.save(out) reread = Image.open(out) self.assertEqual(reread.size, (500, 500)) self._assert_noerr(reread) self.assert_image_equal(reread, rot) self.assertEqual(reread.info['compression'], 'group4') self.assertEqual(reread.info['compression'],", "with open(file, 'rb') as f: im = Image.open(f) self.assertEqual(im.size, (500, 500)) self._assert_noerr(im) def", "(im.getpixel((0, 0))) print (im.getpixel((0, 1))) print (im.getpixel((0, 2))) print (im2.getpixel((0, 0))) print (im2.getpixel((0,", "= Image.open('Tests/images/g4-fillorder-test.tif') self.assert_image_equal(g4, png) def test_g4_write(self): \"\"\"Checking to see that the saved image", "get back mode = 'I;16B' here. def test_big_endian(self): im = Image.open('Tests/images/16bit.MM.deflate.tif') self.assertEqual(im.getpixel((0, 0)),", "b = im.tobytes() # Bytes are in image native order (little endian) if", "(im.getpixel((0, 2))) print (im2.getpixel((0, 0))) print (im2.getpixel((0, 1))) print (im2.getpixel((0, 2))) self.assert_image_equal(im, im2)", "(im2.getpixel((0, 1))) print (im2.getpixel((0, 2))) self.assert_image_equal(im, im2) def test_blur(self): # test case from", "self.assertEqual(im.mode, \"1\") # Does the data actually load im.load() im.getdata() try: self.assertEqual(im._compression, 'group4')", "PhotometricInterpretation is set from SAVE_INFO, # not the original image. ignored = [", "Image.open(file) out = self.tempfile(\"temp.tif\") orig.tag[269] = 'temp.tif' orig.save(out) reread = Image.open(out) self.assertEqual('temp.tif', reread.tag[269])", "are in image native order (big endian) if py3: self.assertEqual(b[0], ord(b'\\x01')) self.assertEqual(b[1], ord(b'\\xe0'))", "def test_save_bytesio(self): # PR 1011 # Test TIFF saving to io.BytesIO() object. TiffImagePlugin.WRITE_LIBTIFF", "= self.tempfile(\"temp.tif\") # out = \"temp.le.tif\" im.save(out) reread = Image.open(out) self.assertEqual(reread.info['compression'], im.info['compression']) self.assertEqual(reread.getpixel((0,", "os.fstat(fn)) self.assertRaises(OSError, lambda: os.close(fn)) def test_multipage(self): # issue #862 TiffImagePlugin.READ_LIBTIFF = True im", "for tag, value in reloaded.items(): if tag not in ignored: if tag.endswith('Resolution'): val", "374), 0)) im.load() def test_write_metadata(self): \"\"\" Test metadata writing through libtiff \"\"\" img", "g4 = Image.open('Tests/images/g4-fillorder-test.tif') self.assert_image_equal(g4, png) def test_g4_write(self): \"\"\"Checking to see that the saved", "do blur on b/w image # and save to compressed tif. from PIL", "io.BytesIO() pilim.save(buffer_io, format=\"tiff\", compression=compression) buffer_io.seek(0) pilim_load = Image.open(buffer_io) self.assert_image_similar(pilim, pilim_load, 0) # save_bytesio()", "(500, 500)) self._assert_noerr(im) def test_g4_eq_png(self): \"\"\" Checking that we're actually getting the data", "we're actually getting the data that we expect\"\"\" png = Image.open('Tests/images/g4-fillorder-test.png') g4 =", "didn't roundtrip\" % tag) else: self.assertEqual( value, reloaded[tag], \"%s didn't roundtrip\" % tag)", "from PIL import ImageFilter out = self.tempfile('temp.tif') im = Image.open('Tests/images/pport_g4.tif') im = im.convert('L')", "for tag, value in original.items(): if tag not in ignored: if tag.endswith('Resolution'): val", "im = Image.open('Tests/images/pport_g4.tif') im = im.convert('L') im = im.filter(ImageFilter.GaussianBlur(4)) im.save(out, compression='tiff_adobe_deflate') im2 =", "self._assert_noerr(im) def test_g4_large(self): file = \"Tests/images/pport_g4.tif\" im = Image.open(file) self._assert_noerr(im) def test_g4_tiff_file(self): \"\"\"Testing", "Image.DEBUG = True im = Image.open('Tests/images/12bit.cropped.tif') im.load() TiffImagePlugin.READ_LIBTIFF = False # to make", "im.save(out, compression='group4')) def test_fp_leak(self): im = Image.open(\"Tests/images/hopper_g4_500.tif\") fn = im.fp.fileno() os.fstat(fn) im.load() #", "= True # Image.DEBUG = True im = Image.open('Tests/images/12bit.cropped.tif') im.load() TiffImagePlugin.READ_LIBTIFF = False", "self._assert_noerr(im) def test_g4_eq_png(self): \"\"\" Checking that we're actually getting the data that we", "loading code path\"\"\" file = \"Tests/images/hopper_g4_500.tif\" s = io.BytesIO() with open(file, 'rb') as", "16 tmp.tif # convert tmp.tif -evaluate RightShift 4 12in16bit2.tif # imagemagick will auto", "test passes, but when running all tests causes a failure due to output", "compression in ('packbits', 'tiff_lzw'): im.save(out, compression=compression) im2 = Image.open(out) self.assert_image_equal(im, im2) im.save(out, compression='jpeg')", "self.assert_image_equal(im, im2) def xtest_bw_compression_w_rgb(self): \"\"\" This test passes, but when running all tests", "from irc, how to do blur on b/w image # and save to", "value in original.items(): if tag not in ignored: if tag.endswith('Resolution'): val = reloaded[tag]", "im.save(out, compression='tiff_adobe_deflate') im2 = Image.open(out) im2.load() self.assert_image_equal(im, im2) def test_compressions(self): im = hopper('RGB')", "actually getting the data that we expect\"\"\" png = Image.open('Tests/images/hopper_bw_500.png') g4 = Image.open('Tests/images/hopper_g4_500.tif')", "reread = Image.open(out) self.assertEqual(reread.info['compression'], im.info['compression']) self.assertEqual(reread.getpixel((0, 0)), 480) def test_g4_string_info(self): \"\"\"Tests String data", "Image.open('Tests/images/hopper.tif') self.assertFalse(im.tag.next) im.load() self.assertFalse(im.tag.next) def test_4bit(self): # Arrange test_file = \"Tests/images/hopper_gray_4bpp.tif\" original =", "to do blur on b/w image # and save to compressed tif. from", "tag, value in reloaded.items(): if tag not in ignored: if tag.endswith('Resolution'): val =", "im.tobytes() # Bytes are in image native order (big endian) if py3: self.assertEqual(b[0],", "hopper('RGB') out = self.tempfile('temp.tif') self.assertRaises( IOError, lambda: im.save(out, compression='tiff_ccitt')) self.assertRaises(IOError, lambda: im.save(out, compression='group3'))", "data in info directory\"\"\" file = \"Tests/images/hopper_g4_500.tif\" orig = Image.open(file) out = self.tempfile(\"temp.tif\")", "= Image.open(file) out = self.tempfile(\"temp.tif\") rot = orig.transpose(Image.ROTATE_90) self.assertEqual(rot.size, (500, 500)) rot.save(out) reread", "roundtrip\" % tag) else: self.assertEqual( original[tag], value, \"%s didn't roundtrip\" % tag) for", "file is a multipage tiff, 10x10 green, 10x10 red, 20x20 blue im.seek(0) self.assertEqual(im.size,", "self.assert_image_equal(reread, rot) self.assertEqual(reread.info['compression'], 'group4') self.assertEqual(reread.info['compression'], orig.info['compression']) self.assertNotEqual(orig.tobytes(), reread.tobytes()) def test_adobe_deflate_tiff(self): file = \"Tests/images/tiff_adobe_deflate.tif\"", "# this should close it. self.assertRaises(OSError, lambda: os.fstat(fn)) im = None # this", "TiffImagePlugin.READ_LIBTIFF = False # Assert self.assertEqual(im.size, (128, 128)) self.assertEqual(im.mode, \"L\") self.assert_image_similar(im, original, 7.3)", "to io.BytesIO() object. TiffImagePlugin.WRITE_LIBTIFF = True TiffImagePlugin.READ_LIBTIFF = True # Generate test image", "(500, 500)) rot.save(out) reread = Image.open(out) self.assertEqual(reread.size, (500, 500)) self._assert_noerr(reread) self.assert_image_equal(reread, rot) self.assertEqual(reread.info['compression'],", "Image.open(file) self.assertEqual(im.size, (500, 500)) self._assert_noerr(im) def test_g4_large(self): file = \"Tests/images/pport_g4.tif\" im = Image.open(file)", "\"Tests/images/tiff_adobe_deflate.tif\" im = Image.open(file) self.assertEqual(im.mode, \"RGB\") self.assertEqual(im.size, (278, 374)) self.assertEqual( im.tile[0][:3], ('tiff_adobe_deflate', (0,", "ImageFilter out = self.tempfile('temp.tif') im = Image.open('Tests/images/pport_g4.tif') im = im.convert('L') im = im.filter(ImageFilter.GaussianBlur(4))", "im.load() self.assertFalse(im.tag.next) def test_4bit(self): # Arrange test_file = \"Tests/images/hopper_gray_4bpp.tif\" original = hopper(\"L\") #", "self.assertEqual(b[1], ord(b'\\x01')) else: self.assertEqual(b[0], b'\\xe0') self.assertEqual(b[1], b'\\x01') out = self.tempfile(\"temp.tif\") # out =", "test_save_bytesio(self): # PR 1011 # Test TIFF saving to io.BytesIO() object. TiffImagePlugin.WRITE_LIBTIFF =", "see that the saved image is the same as what we wrote\"\"\" file", "writing through libtiff \"\"\" img = Image.open('Tests/images/hopper_g4.tif') f = self.tempfile('temp.tiff') img.save(f, tiffinfo=img.tag) loaded", "test_big_endian(self): im = Image.open('Tests/images/16bit.MM.deflate.tif') self.assertEqual(im.getpixel((0, 0)), 480) self.assertEqual(im.mode, 'I;16B') b = im.tobytes() #", "directory\"\"\" file = \"Tests/images/hopper_g4_500.tif\" orig = Image.open(file) out = self.tempfile(\"temp.tif\") orig.tag[269] = 'temp.tif'", "error thrown by libtiff. We need to capture that but not now\"\"\" im", "back mode = 'I;16B' here. def test_big_endian(self): im = Image.open('Tests/images/16bit.MM.deflate.tif') self.assertEqual(im.getpixel((0, 0)), 480)", "lambda: os.fstat(fn)) im = None # this should force even more closed. self.assertRaises(OSError,", "reread.tobytes()) def test_adobe_deflate_tiff(self): file = \"Tests/images/tiff_adobe_deflate.tif\" im = Image.open(file) self.assertEqual(im.mode, \"RGB\") self.assertEqual(im.size, (278,", "= orig.transpose(Image.ROTATE_90) self.assertEqual(rot.size, (500, 500)) rot.save(out) reread = Image.open(out) self.assertEqual(reread.size, (500, 500)) self._assert_noerr(reread)", "unshift so that the integer values are the same. im2 = Image.open('Tests/images/12in16bit.tif') if", "reread = Image.open(out) self.assertEqual(reread.size, (500, 500)) self._assert_noerr(reread) self.assert_image_equal(reread, rot) self.assertEqual(reread.info['compression'], 'group4') self.assertEqual(reread.info['compression'], orig.info['compression'])", "TiffImagePlugin.READ_LIBTIFF = True # Generate test image pilim = hopper() def save_bytesio(compression=None): buffer_io", "= Image.open(out) self.assertEqual(reread.info['compression'], 'group3') self.assert_image_equal(reread, i) def test_little_endian(self): im = Image.open('Tests/images/16bit.deflate.tif') self.assertEqual(im.getpixel((0, 0)),", "= img.tag.named() reloaded = loaded.tag.named() # PhotometricInterpretation is set from SAVE_INFO, # not", "in reloaded.items(): if tag not in ignored: if tag.endswith('Resolution'): val = original[tag] self.assert_almost_equal(", "\"\"\" Checking that we're actually getting the data that we expect\"\"\" png =", "# convert tmp.tif -evaluate RightShift 4 12in16bit2.tif # imagemagick will auto scale so", "10)) self.assertEqual(im.convert('RGB').getpixel((0, 0)), (255, 0, 0)) self.assertTrue(im.tag.next) im.seek(2) self.assertFalse(im.tag.next) self.assertEqual(im.size, (20, 20)) self.assertEqual(im.convert('RGB').getpixel((0,", "self.assertEqual( original[tag], value, \"%s didn't roundtrip\" % tag) for tag, value in original.items():", "\"Tests/images/hopper_g4_500.tif\" s = io.BytesIO() with open(file, 'rb') as f: s.write(f.read()) s.seek(0) im =", "im.convert('L') im = im.filter(ImageFilter.GaussianBlur(4)) im.save(out, compression='tiff_adobe_deflate') im2 = Image.open(out) im2.load() self.assert_image_equal(im, im2) def", "0)), (0, 128, 0)) self.assertTrue(im.tag.next) im.seek(1) self.assertEqual(im.size, (10, 10)) self.assertEqual(im.convert('RGB').getpixel((0, 0)), (255, 0,", "tag) else: self.assertEqual( value, reloaded[tag], \"%s didn't roundtrip\" % tag) def test_g3_compression(self): i", "roundtrip\" % tag) for tag, value in original.items(): if tag not in ignored:", "compression='group4')) def test_fp_leak(self): im = Image.open(\"Tests/images/hopper_g4_500.tif\") fn = im.fp.fileno() os.fstat(fn) im.load() # this", "self.assertEqual(b[0], b'\\x01') self.assertEqual(b[1], b'\\xe0') out = self.tempfile(\"temp.tif\") im.save(out) reread = Image.open(out) self.assertEqual(reread.info['compression'], im.info['compression'])", "it back out, in a different form. out = self.tempfile(\"temp.png\") im.save(out) class TestFileLibTiff(LibTiffTestCase):", "io from PIL import Image, TiffImagePlugin class LibTiffTestCase(PillowTestCase): def setUp(self): codecs = dir(Image.core)", "self.assertEqual(im.mode, 'I;16') b = im.tobytes() # Bytes are in image native order (little", "need to unshift so that the integer values are the same. im2 =", "0)) self.assertTrue(im.tag.next) im.seek(1) self.assertEqual(im.size, (10, 10)) self.assertEqual(im.convert('RGB').getpixel((0, 0)), (255, 0, 0)) self.assertTrue(im.tag.next) im.seek(2)", "reread = Image.open(out) self.assertEqual(reread.info['compression'], im.info['compression']) self.assertEqual(reread.getpixel((0, 0)), 480) # UNDONE - libtiff defaults", "self.assertEqual(reread.info['compression'], 'group4') self.assertEqual(reread.info['compression'], orig.info['compression']) self.assertNotEqual(orig.tobytes(), reread.tobytes()) def test_adobe_deflate_tiff(self): file = \"Tests/images/tiff_adobe_deflate.tif\" im =", "png = Image.open('Tests/images/hopper_bw_500.png') g4 = Image.open('Tests/images/hopper_g4_500.tif') self.assert_image_equal(g4, png) # see https://github.com/python-pillow/Pillow/issues/279 def test_g4_fillorder_eq_png(self):", "same. im2 = Image.open('Tests/images/12in16bit.tif') if Image.DEBUG: print (im.getpixel((0, 0))) print (im.getpixel((0, 1))) print", "Image.open(\"Tests/images/hopper_g4_500.tif\") fn = im.fp.fileno() os.fstat(fn) im.load() # this should close it. self.assertRaises(OSError, lambda:", "0, 278, 374), 0)) im.load() def test_write_metadata(self): \"\"\" Test metadata writing through libtiff", "here. def test_big_endian(self): im = Image.open('Tests/images/16bit.MM.deflate.tif') self.assertEqual(im.getpixel((0, 0)), 480) self.assertEqual(im.mode, 'I;16B') b =", "tag) else: self.assertEqual( original[tag], value, \"%s didn't roundtrip\" % tag) for tag, value", "in original.items(): if tag not in ignored: if tag.endswith('Resolution'): val = reloaded[tag] self.assert_almost_equal(", "test_4bit(self): # Arrange test_file = \"Tests/images/hopper_gray_4bpp.tif\" original = hopper(\"L\") # Act TiffImagePlugin.READ_LIBTIFF =", "native order (little endian) if py3: self.assertEqual(b[0], ord(b'\\xe0')) self.assertEqual(b[1], ord(b'\\x01')) else: self.assertEqual(b[0], b'\\xe0')", "(0, 128, 0)) self.assertTrue(im.tag.next) im.seek(1) self.assertEqual(im.size, (10, 10)) self.assertEqual(im.convert('RGB').getpixel((0, 0)), (255, 0, 0))", "(128, 128)) self.assertEqual(im.mode, \"L\") self.assert_image_similar(im, original, 7.3) def test_save_bytesio(self): # PR 1011 #", "sanity about the g4 tiff reading\"\"\" # 1 bit self.assertEqual(im.mode, \"1\") # Does", "class TestFileLibTiff(LibTiffTestCase): def test_g4_tiff(self): \"\"\"Test the ordinary file path load path\"\"\" file =", "= [ 'StripByteCounts', 'RowsPerStrip', 'PageNumber', 'PhotometricInterpretation'] for tag, value in reloaded.items(): if tag", "def test_g4_tiff_file(self): \"\"\"Testing the string load path\"\"\" file = \"Tests/images/hopper_g4_500.tif\" with open(file, 'rb')", "g4 = Image.open('Tests/images/hopper_g4_500.tif') self.assert_image_equal(g4, png) # see https://github.com/python-pillow/Pillow/issues/279 def test_g4_fillorder_eq_png(self): \"\"\" Checking that", "pilim = hopper() def save_bytesio(compression=None): buffer_io = io.BytesIO() pilim.save(buffer_io, format=\"tiff\", compression=compression) buffer_io.seek(0) pilim_load", "mode = 'I;16B' here. def test_big_endian(self): im = Image.open('Tests/images/16bit.MM.deflate.tif') self.assertEqual(im.getpixel((0, 0)), 480) self.assertEqual(im.mode,", "class LibTiffTestCase(PillowTestCase): def setUp(self): codecs = dir(Image.core) if \"libtiff_encoder\" not in codecs or", "if tag not in ignored: if tag.endswith('Resolution'): val = reloaded[tag] self.assert_almost_equal( val[0][0]/val[0][1], value[0][0]/value[0][1],", "Image.open('Tests/images/hopper_g4.tif') f = self.tempfile('temp.tiff') img.save(f, tiffinfo=img.tag) loaded = Image.open(f) original = img.tag.named() reloaded", "Image.open(out) self.assertEqual(reread.info['compression'], im.info['compression']) self.assertEqual(reread.getpixel((0, 0)), 480) # UNDONE - libtiff defaults to writing", "True # Image.DEBUG = True im = Image.open('Tests/images/12bit.cropped.tif') im.load() TiffImagePlugin.READ_LIBTIFF = False #", "same as what we wrote\"\"\" file = \"Tests/images/hopper_g4_500.tif\" orig = Image.open(file) out =", "we need to unshift so that the integer values are the same. im2", "False TiffImagePlugin.READ_LIBTIFF = False if __name__ == '__main__': unittest.main() # End of file", "Arrange test_file = \"Tests/images/hopper_gray_4bpp.tif\" original = hopper(\"L\") # Act TiffImagePlugin.READ_LIBTIFF = True im", "Image.open('Tests/images/hopper_g4_500.tif') out = self.tempfile(\"temp.tif\") i.save(out, compression='group3') reread = Image.open(out) self.assertEqual(reread.info['compression'], 'group3') self.assert_image_equal(reread, i)", "im.save(out) reread = Image.open(out) self.assertEqual(reread.info['compression'], im.info['compression']) self.assertEqual(reread.getpixel((0, 0)), 480) def test_g4_string_info(self): \"\"\"Tests String", "force even more closed. self.assertRaises(OSError, lambda: os.fstat(fn)) self.assertRaises(OSError, lambda: os.close(fn)) def test_multipage(self): #", "Image.open('Tests/images/g4-fillorder-test.png') g4 = Image.open('Tests/images/g4-fillorder-test.tif') self.assert_image_equal(g4, png) def test_g4_write(self): \"\"\"Checking to see that the", "self.assert_image_equal(im, im2) def test_compressions(self): im = hopper('RGB') out = self.tempfile('temp.tif') for compression in", "0) # save_bytesio() save_bytesio('raw') save_bytesio(\"packbits\") save_bytesio(\"tiff_lzw\") TiffImagePlugin.WRITE_LIBTIFF = False TiffImagePlugin.READ_LIBTIFF = False if", "'I;16B') b = im.tobytes() # Bytes are in image native order (big endian)", "# Image.DEBUG = True im = Image.open('Tests/images/12bit.cropped.tif') im.load() TiffImagePlugin.READ_LIBTIFF = False # to", "= im.filter(ImageFilter.GaussianBlur(4)) im.save(out, compression='tiff_adobe_deflate') im2 = Image.open(out) im2.load() self.assert_image_equal(im, im2) def test_compressions(self): im", "im.info['compression']) self.assertEqual(reread.getpixel((0, 0)), 480) # UNDONE - libtiff defaults to writing in native", "save_bytesio() save_bytesio('raw') save_bytesio(\"packbits\") save_bytesio(\"tiff_lzw\") TiffImagePlugin.WRITE_LIBTIFF = False TiffImagePlugin.READ_LIBTIFF = False if __name__ ==", "480) self.assertEqual(im.mode, 'I;16') b = im.tobytes() # Bytes are in image native order", "target -- # convert 12bit.cropped.tif -depth 16 tmp.tif # convert tmp.tif -evaluate RightShift", "value[0][0]/value[0][1], msg=\"%s didn't roundtrip\" % tag) else: self.assertEqual( original[tag], value, \"%s didn't roundtrip\"", "to capture that but not now\"\"\" im = hopper('RGB') out = self.tempfile('temp.tif') self.assertRaises(", "self.assertEqual(im.convert('RGB').getpixel((0, 0)), (0, 0, 255)) TiffImagePlugin.READ_LIBTIFF = False def test__next(self): TiffImagePlugin.READ_LIBTIFF = True", "= \"Tests/images/hopper_gray_4bpp.tif\" original = hopper(\"L\") # Act TiffImagePlugin.READ_LIBTIFF = True im = Image.open(test_file)", "im2, 30) def test_cmyk_save(self): im = hopper('CMYK') out = self.tempfile('temp.tif') im.save(out, compression='tiff_adobe_deflate') im2", "if \"libtiff_encoder\" not in codecs or \"libtiff_decoder\" not in codecs: self.skipTest(\"tiff support not", "ignored: if tag.endswith('Resolution'): val = original[tag] self.assert_almost_equal( val[0][0]/val[0][1], value[0][0]/value[0][1], msg=\"%s didn't roundtrip\" %", "def test__next(self): TiffImagePlugin.READ_LIBTIFF = True im = Image.open('Tests/images/hopper.tif') self.assertFalse(im.tag.next) im.load() self.assertFalse(im.tag.next) def test_4bit(self):", "(10, 10)) self.assertEqual(im.convert('RGB').getpixel((0, 0)), (0, 128, 0)) self.assertTrue(im.tag.next) im.seek(1) self.assertEqual(im.size, (10, 10)) self.assertEqual(im.convert('RGB').getpixel((0,", "self.tempfile('temp.tif') for compression in ('packbits', 'tiff_lzw'): im.save(out, compression=compression) im2 = Image.open(out) self.assert_image_equal(im, im2)", "output on stderr from the error thrown by libtiff. We need to capture", "self.assertEqual( value, reloaded[tag], \"%s didn't roundtrip\" % tag) def test_g3_compression(self): i = Image.open('Tests/images/hopper_g4_500.tif')", "= loaded.tag.named() # PhotometricInterpretation is set from SAVE_INFO, # not the original image.", "def test_g4_fillorder_eq_png(self): \"\"\" Checking that we're actually getting the data that we expect\"\"\"", "im.seek(0) self.assertEqual(im.size, (10, 10)) self.assertEqual(im.convert('RGB').getpixel((0, 0)), (0, 128, 0)) self.assertTrue(im.tag.next) im.seek(1) self.assertEqual(im.size, (10,", "data that we expect\"\"\" png = Image.open('Tests/images/hopper_bw_500.png') g4 = Image.open('Tests/images/hopper_g4_500.tif') self.assert_image_equal(g4, png) #", "12bit.cropped.tif -depth 16 tmp.tif # convert tmp.tif -evaluate RightShift 4 12in16bit2.tif # imagemagick", "self.assertEqual(im.size, (500, 500)) self._assert_noerr(im) def test_g4_eq_png(self): \"\"\" Checking that we're actually getting the", "500)) self._assert_noerr(im) def test_g4_large(self): file = \"Tests/images/pport_g4.tif\" im = Image.open(file) self._assert_noerr(im) def test_g4_tiff_file(self):", "= io.BytesIO() with open(file, 'rb') as f: s.write(f.read()) s.seek(0) im = Image.open(s) self.assertEqual(im.size,", "= Image.open(\"Tests/images/hopper_g4_500.tif\") fn = im.fp.fileno() os.fstat(fn) im.load() # this should close it. self.assertRaises(OSError,", "1 bit self.assertEqual(im.mode, \"1\") # Does the data actually load im.load() im.getdata() try:", "orig.tag[269] = 'temp.tif' orig.save(out) reread = Image.open(out) self.assertEqual('temp.tif', reread.tag[269]) def test_12bit_rawmode(self): \"\"\" Are", "im2 = Image.open(out) im2.load() self.assert_image_equal(im, im2) def test_compressions(self): im = hopper('RGB') out =", "(im.getpixel((0, 1))) print (im.getpixel((0, 2))) print (im2.getpixel((0, 0))) print (im2.getpixel((0, 1))) print (im2.getpixel((0,", "def save_bytesio(compression=None): buffer_io = io.BytesIO() pilim.save(buffer_io, format=\"tiff\", compression=compression) buffer_io.seek(0) pilim_load = Image.open(buffer_io) self.assert_image_similar(pilim,", "so we need to unshift so that the integer values are the same.", "os.fstat(fn) im.load() # this should close it. self.assertRaises(OSError, lambda: os.fstat(fn)) im = None", "s = io.BytesIO() with open(file, 'rb') as f: s.write(f.read()) s.seek(0) im = Image.open(s)", "we'll get back mode = 'I;16B' here. def test_big_endian(self): im = Image.open('Tests/images/16bit.MM.deflate.tif') self.assertEqual(im.getpixel((0,", "# so we need to unshift so that the integer values are the", "TiffImagePlugin.READ_LIBTIFF = True im = Image.open('Tests/images/multipage.tiff') # file is a multipage tiff, 10x10", "im = Image.open(file) self._assert_noerr(im) def test_g4_tiff_file(self): \"\"\"Testing the string load path\"\"\" file =", "im = Image.open(\"Tests/images/hopper_g4_500.tif\") fn = im.fp.fileno() os.fstat(fn) im.load() # this should close it.", "= io.BytesIO() pilim.save(buffer_io, format=\"tiff\", compression=compression) buffer_io.seek(0) pilim_load = Image.open(buffer_io) self.assert_image_similar(pilim, pilim_load, 0) #", "0))) print (im.getpixel((0, 1))) print (im.getpixel((0, 2))) print (im2.getpixel((0, 0))) print (im2.getpixel((0, 1)))", "0)), (0, 0, 255)) TiffImagePlugin.READ_LIBTIFF = False def test__next(self): TiffImagePlugin.READ_LIBTIFF = True im", "0))) print (im2.getpixel((0, 1))) print (im2.getpixel((0, 2))) self.assert_image_equal(im, im2) def test_blur(self): # test", "out = \"temp.le.tif\" im.save(out) reread = Image.open(out) self.assertEqual(reread.info['compression'], im.info['compression']) self.assertEqual(reread.getpixel((0, 0)), 480) #", "to output on stderr from the error thrown by libtiff. We need to", "compression='group3')) self.assertRaises(IOError, lambda: im.save(out, compression='group4')) def test_fp_leak(self): im = Image.open(\"Tests/images/hopper_g4_500.tif\") fn = im.fp.fileno()", "i.save(out, compression='group3') reread = Image.open(out) self.assertEqual(reread.info['compression'], 'group3') self.assert_image_equal(reread, i) def test_little_endian(self): im =", "reread.tag[269]) def test_12bit_rawmode(self): \"\"\" Are we generating the same interpretation of the image", "(0, 0, 255)) TiffImagePlugin.READ_LIBTIFF = False def test__next(self): TiffImagePlugin.READ_LIBTIFF = True im =", "assert basic sanity about the g4 tiff reading\"\"\" # 1 bit self.assertEqual(im.mode, \"1\")", "original[tag] self.assert_almost_equal( val[0][0]/val[0][1], value[0][0]/value[0][1], msg=\"%s didn't roundtrip\" % tag) else: self.assertEqual( original[tag], value,", "'I;16B' here. def test_big_endian(self): im = Image.open('Tests/images/16bit.MM.deflate.tif') self.assertEqual(im.getpixel((0, 0)), 480) self.assertEqual(im.mode, 'I;16B') b", "tag.endswith('Resolution'): val = original[tag] self.assert_almost_equal( val[0][0]/val[0][1], value[0][0]/value[0][1], msg=\"%s didn't roundtrip\" % tag) else:", "self.assertEqual(reread.info['compression'], im.info['compression']) self.assertEqual(reread.getpixel((0, 0)), 480) # UNDONE - libtiff defaults to writing in", "pilim_load = Image.open(buffer_io) self.assert_image_similar(pilim, pilim_load, 0) # save_bytesio() save_bytesio('raw') save_bytesio(\"packbits\") save_bytesio(\"tiff_lzw\") TiffImagePlugin.WRITE_LIBTIFF =", "order (little endian) if py3: self.assertEqual(b[0], ord(b'\\xe0')) self.assertEqual(b[1], ord(b'\\x01')) else: self.assertEqual(b[0], b'\\xe0') self.assertEqual(b[1],", "pilim_load, 0) # save_bytesio() save_bytesio('raw') save_bytesio(\"packbits\") save_bytesio(\"tiff_lzw\") TiffImagePlugin.WRITE_LIBTIFF = False TiffImagePlugin.READ_LIBTIFF = False", "the saved image is the same as what we wrote\"\"\" file = \"Tests/images/hopper_g4_500.tif\"", "= Image.open(buffer_io) self.assert_image_similar(pilim, pilim_load, 0) # save_bytesio() save_bytesio('raw') save_bytesio(\"packbits\") save_bytesio(\"tiff_lzw\") TiffImagePlugin.WRITE_LIBTIFF = False", "compression='group3') reread = Image.open(out) self.assertEqual(reread.info['compression'], 'group3') self.assert_image_equal(reread, i) def test_little_endian(self): im = Image.open('Tests/images/16bit.deflate.tif')", "except: print(\"No _compression\") print (dir(im)) # can we write it back out, in", "basic sanity about the g4 tiff reading\"\"\" # 1 bit self.assertEqual(im.mode, \"1\") #", "import unittest, PillowTestCase, hopper, py3 import os import io from PIL import Image,", "f: im = Image.open(f) self.assertEqual(im.size, (500, 500)) self._assert_noerr(im) def test_g4_tiff_bytesio(self): \"\"\"Testing the stringio", "self.assertEqual(im.getpixel((0, 0)), 480) self.assertEqual(im.mode, 'I;16B') b = im.tobytes() # Bytes are in image", "convert 12bit.cropped.tif -depth 16 tmp.tif # convert tmp.tif -evaluate RightShift 4 12in16bit2.tif #", "a different form. out = self.tempfile(\"temp.png\") im.save(out) class TestFileLibTiff(LibTiffTestCase): def test_g4_tiff(self): \"\"\"Test the", "that but not now\"\"\" im = hopper('RGB') out = self.tempfile('temp.tif') self.assertRaises( IOError, lambda:", "tif. from PIL import ImageFilter out = self.tempfile('temp.tif') im = Image.open('Tests/images/pport_g4.tif') im =", "rot = orig.transpose(Image.ROTATE_90) self.assertEqual(rot.size, (500, 500)) rot.save(out) reread = Image.open(out) self.assertEqual(reread.size, (500, 500))", "from helper import unittest, PillowTestCase, hopper, py3 import os import io from PIL", "\"1\") # Does the data actually load im.load() im.getdata() try: self.assertEqual(im._compression, 'group4') except:", "Image.open(f) self.assertEqual(im.size, (500, 500)) self._assert_noerr(im) def test_g4_tiff_bytesio(self): \"\"\"Testing the stringio loading code path\"\"\"", "s.seek(0) im = Image.open(s) self.assertEqual(im.size, (500, 500)) self._assert_noerr(im) def test_g4_eq_png(self): \"\"\" Checking that", "im2 = Image.open(out) self.assert_image_similar(im, im2, 30) def test_cmyk_save(self): im = hopper('CMYK') out =", "# on big endian, we'll get back mode = 'I;16B' here. def test_big_endian(self):", "= False def test__next(self): TiffImagePlugin.READ_LIBTIFF = True im = Image.open('Tests/images/hopper.tif') self.assertFalse(im.tag.next) im.load() self.assertFalse(im.tag.next)", "in ignored: if tag.endswith('Resolution'): val = original[tag] self.assert_almost_equal( val[0][0]/val[0][1], value[0][0]/value[0][1], msg=\"%s didn't roundtrip\"", "= \"Tests/images/pport_g4.tif\" im = Image.open(file) self._assert_noerr(im) def test_g4_tiff_file(self): \"\"\"Testing the string load path\"\"\"", "but when running all tests causes a failure due to output on stderr", "expect\"\"\" png = Image.open('Tests/images/g4-fillorder-test.png') g4 = Image.open('Tests/images/g4-fillorder-test.tif') self.assert_image_equal(g4, png) def test_g4_write(self): \"\"\"Checking to", "test_fp_leak(self): im = Image.open(\"Tests/images/hopper_g4_500.tif\") fn = im.fp.fileno() os.fstat(fn) im.load() # this should close", "is set from SAVE_INFO, # not the original image. ignored = [ 'StripByteCounts',", "self.assertEqual(im.size, (10, 10)) self.assertEqual(im.convert('RGB').getpixel((0, 0)), (255, 0, 0)) self.assertTrue(im.tag.next) im.seek(2) self.assertFalse(im.tag.next) self.assertEqual(im.size, (20,", "_assert_noerr(self, im): \"\"\"Helper tests that assert basic sanity about the g4 tiff reading\"\"\"", "def test_write_metadata(self): \"\"\" Test metadata writing through libtiff \"\"\" img = Image.open('Tests/images/hopper_g4.tif') f", "im2) im.save(out, compression='jpeg') im2 = Image.open(out) self.assert_image_similar(im, im2, 30) def test_cmyk_save(self): im =", "im2) def test_compressions(self): im = hopper('RGB') out = self.tempfile('temp.tif') for compression in ('packbits',", "as Imagemagick is? \"\"\" TiffImagePlugin.READ_LIBTIFF = True # Image.DEBUG = True im =", "im = hopper('RGB') out = self.tempfile('temp.tif') for compression in ('packbits', 'tiff_lzw'): im.save(out, compression=compression)", "('packbits', 'tiff_lzw'): im.save(out, compression=compression) im2 = Image.open(out) self.assert_image_equal(im, im2) im.save(out, compression='jpeg') im2 =", "# 1 bit self.assertEqual(im.mode, \"1\") # Does the data actually load im.load() im.getdata()", "im2 = Image.open(out) self.assert_image_equal(im, im2) def xtest_bw_compression_w_rgb(self): \"\"\" This test passes, but when", "(10, 10)) self.assertEqual(im.convert('RGB').getpixel((0, 0)), (255, 0, 0)) self.assertTrue(im.tag.next) im.seek(2) self.assertFalse(im.tag.next) self.assertEqual(im.size, (20, 20))", "im.load() # this should close it. self.assertRaises(OSError, lambda: os.fstat(fn)) im = None #", "# not the original image. ignored = [ 'StripByteCounts', 'RowsPerStrip', 'PageNumber', 'PhotometricInterpretation'] for", "128)) self.assertEqual(im.mode, \"L\") self.assert_image_similar(im, original, 7.3) def test_save_bytesio(self): # PR 1011 # Test", "def test_adobe_deflate_tiff(self): file = \"Tests/images/tiff_adobe_deflate.tif\" im = Image.open(file) self.assertEqual(im.mode, \"RGB\") self.assertEqual(im.size, (278, 374))", "Image.open(out) self.assert_image_similar(im, im2, 30) def test_cmyk_save(self): im = hopper('CMYK') out = self.tempfile('temp.tif') im.save(out,", "TiffImagePlugin.READ_LIBTIFF = True im = Image.open('Tests/images/hopper.tif') self.assertFalse(im.tag.next) im.load() self.assertFalse(im.tag.next) def test_4bit(self): # Arrange", "Image.open('Tests/images/pport_g4.tif') im = im.convert('L') im = im.filter(ImageFilter.GaussianBlur(4)) im.save(out, compression='tiff_adobe_deflate') im2 = Image.open(out) im2.load()", "self.assertEqual(im.mode, \"RGB\") self.assertEqual(im.size, (278, 374)) self.assertEqual( im.tile[0][:3], ('tiff_adobe_deflate', (0, 0, 278, 374), 0))", "test_g4_large(self): file = \"Tests/images/pport_g4.tif\" im = Image.open(file) self._assert_noerr(im) def test_g4_tiff_file(self): \"\"\"Testing the string", "image. ignored = [ 'StripByteCounts', 'RowsPerStrip', 'PageNumber', 'PhotometricInterpretation'] for tag, value in reloaded.items():", "= \"Tests/images/hopper_g4_500.tif\" s = io.BytesIO() with open(file, 'rb') as f: s.write(f.read()) s.seek(0) im", "= True im = Image.open('Tests/images/hopper.tif') self.assertFalse(im.tag.next) im.load() self.assertFalse(im.tag.next) def test_4bit(self): # Arrange test_file", "compression='jpeg') im2 = Image.open(out) self.assert_image_similar(im, im2, 30) def test_cmyk_save(self): im = hopper('CMYK') out", "tag not in ignored: if tag.endswith('Resolution'): val = reloaded[tag] self.assert_almost_equal( val[0][0]/val[0][1], value[0][0]/value[0][1], msg=\"%s", "_compression\") print (dir(im)) # can we write it back out, in a different", "blue im.seek(0) self.assertEqual(im.size, (10, 10)) self.assertEqual(im.convert('RGB').getpixel((0, 0)), (0, 128, 0)) self.assertTrue(im.tag.next) im.seek(1) self.assertEqual(im.size,", "im = im.filter(ImageFilter.GaussianBlur(4)) im.save(out, compression='tiff_adobe_deflate') im2 = Image.open(out) im2.load() self.assert_image_equal(im, im2) def test_compressions(self):", "reread = Image.open(out) self.assertEqual(reread.info['compression'], 'group3') self.assert_image_equal(reread, i) def test_little_endian(self): im = Image.open('Tests/images/16bit.deflate.tif') self.assertEqual(im.getpixel((0,", "= None # this should force even more closed. self.assertRaises(OSError, lambda: os.fstat(fn)) self.assertRaises(OSError,", "import ImageFilter out = self.tempfile('temp.tif') im = Image.open('Tests/images/pport_g4.tif') im = im.convert('L') im =", "im.seek(1) self.assertEqual(im.size, (10, 10)) self.assertEqual(im.convert('RGB').getpixel((0, 0)), (255, 0, 0)) self.assertTrue(im.tag.next) im.seek(2) self.assertFalse(im.tag.next) self.assertEqual(im.size,", "\"\"\"Testing the stringio loading code path\"\"\" file = \"Tests/images/hopper_g4_500.tif\" s = io.BytesIO() with", "writing in native endian, so # on big endian, we'll get back mode", "Test metadata writing through libtiff \"\"\" img = Image.open('Tests/images/hopper_g4.tif') f = self.tempfile('temp.tiff') img.save(f,", "out = self.tempfile('temp.tif') im = Image.open('Tests/images/pport_g4.tif') im = im.convert('L') im = im.filter(ImageFilter.GaussianBlur(4)) im.save(out,", "= im.tobytes() # Bytes are in image native order (little endian) if py3:", "def test_big_endian(self): im = Image.open('Tests/images/16bit.MM.deflate.tif') self.assertEqual(im.getpixel((0, 0)), 480) self.assertEqual(im.mode, 'I;16B') b = im.tobytes()", "'group3') self.assert_image_equal(reread, i) def test_little_endian(self): im = Image.open('Tests/images/16bit.deflate.tif') self.assertEqual(im.getpixel((0, 0)), 480) self.assertEqual(im.mode, 'I;16')", "original, 7.3) def test_save_bytesio(self): # PR 1011 # Test TIFF saving to io.BytesIO()", "Image.open(out) self.assertEqual('temp.tif', reread.tag[269]) def test_12bit_rawmode(self): \"\"\" Are we generating the same interpretation of", "of the image as Imagemagick is? \"\"\" TiffImagePlugin.READ_LIBTIFF = True # Image.DEBUG =", "self.assert_image_similar(im, im2, 30) def test_cmyk_save(self): im = hopper('CMYK') out = self.tempfile('temp.tif') im.save(out, compression='tiff_adobe_deflate')", "We need to capture that but not now\"\"\" im = hopper('RGB') out =", "test_compressions(self): im = hopper('RGB') out = self.tempfile('temp.tif') for compression in ('packbits', 'tiff_lzw'): im.save(out,", "png = Image.open('Tests/images/g4-fillorder-test.png') g4 = Image.open('Tests/images/g4-fillorder-test.tif') self.assert_image_equal(g4, png) def test_g4_write(self): \"\"\"Checking to see", "should force even more closed. self.assertRaises(OSError, lambda: os.fstat(fn)) self.assertRaises(OSError, lambda: os.close(fn)) def test_multipage(self):", "i) def test_little_endian(self): im = Image.open('Tests/images/16bit.deflate.tif') self.assertEqual(im.getpixel((0, 0)), 480) self.assertEqual(im.mode, 'I;16') b =", "Image.DEBUG: print (im.getpixel((0, 0))) print (im.getpixel((0, 1))) print (im.getpixel((0, 2))) print (im2.getpixel((0, 0)))", "file = \"Tests/images/tiff_adobe_deflate.tif\" im = Image.open(file) self.assertEqual(im.mode, \"RGB\") self.assertEqual(im.size, (278, 374)) self.assertEqual( im.tile[0][:3],", "didn't roundtrip\" % tag) else: self.assertEqual( original[tag], value, \"%s didn't roundtrip\" % tag)", "endian) if py3: self.assertEqual(b[0], ord(b'\\x01')) self.assertEqual(b[1], ord(b'\\xe0')) else: self.assertEqual(b[0], b'\\x01') self.assertEqual(b[1], b'\\xe0') out", "'StripByteCounts', 'RowsPerStrip', 'PageNumber', 'PhotometricInterpretation'] for tag, value in reloaded.items(): if tag not in", "test_g4_fillorder_eq_png(self): \"\"\" Checking that we're actually getting the data that we expect\"\"\" png", "path\"\"\" file = \"Tests/images/hopper_g4_500.tif\" s = io.BytesIO() with open(file, 'rb') as f: s.write(f.read())", "im.save(out, compression='tiff_adobe_deflate') im2 = Image.open(out) self.assert_image_equal(im, im2) def xtest_bw_compression_w_rgb(self): \"\"\" This test passes,", "im.fp.fileno() os.fstat(fn) im.load() # this should close it. self.assertRaises(OSError, lambda: os.fstat(fn)) im =", "True im = Image.open('Tests/images/12bit.cropped.tif') im.load() TiffImagePlugin.READ_LIBTIFF = False # to make the target", "= Image.open(out) self.assertEqual(reread.size, (500, 500)) self._assert_noerr(reread) self.assert_image_equal(reread, rot) self.assertEqual(reread.info['compression'], 'group4') self.assertEqual(reread.info['compression'], orig.info['compression']) self.assertNotEqual(orig.tobytes(),", "im = im.convert('L') im = im.filter(ImageFilter.GaussianBlur(4)) im.save(out, compression='tiff_adobe_deflate') im2 = Image.open(out) im2.load() self.assert_image_equal(im,", "im = Image.open('Tests/images/multipage.tiff') # file is a multipage tiff, 10x10 green, 10x10 red,", "480) def test_g4_string_info(self): \"\"\"Tests String data in info directory\"\"\" file = \"Tests/images/hopper_g4_500.tif\" orig", "= True im = Image.open(test_file) TiffImagePlugin.READ_LIBTIFF = False # Assert self.assertEqual(im.size, (128, 128))", "im = hopper('CMYK') out = self.tempfile('temp.tif') im.save(out, compression='tiff_adobe_deflate') im2 = Image.open(out) self.assert_image_equal(im, im2)", "unittest, PillowTestCase, hopper, py3 import os import io from PIL import Image, TiffImagePlugin", "= \"Tests/images/hopper_g4_500.tif\" orig = Image.open(file) out = self.tempfile(\"temp.tif\") orig.tag[269] = 'temp.tif' orig.save(out) reread", "save to compressed tif. from PIL import ImageFilter out = self.tempfile('temp.tif') im =", "10)) self.assertEqual(im.convert('RGB').getpixel((0, 0)), (0, 128, 0)) self.assertTrue(im.tag.next) im.seek(1) self.assertEqual(im.size, (10, 10)) self.assertEqual(im.convert('RGB').getpixel((0, 0)),", "print (im2.getpixel((0, 1))) print (im2.getpixel((0, 2))) self.assert_image_equal(im, im2) def test_blur(self): # test case", "set from SAVE_INFO, # not the original image. ignored = [ 'StripByteCounts', 'RowsPerStrip',", "to writing in native endian, so # on big endian, we'll get back", "\"Tests/images/hopper_g4_500.tif\" with open(file, 'rb') as f: im = Image.open(f) self.assertEqual(im.size, (500, 500)) self._assert_noerr(im)", "for compression in ('packbits', 'tiff_lzw'): im.save(out, compression=compression) im2 = Image.open(out) self.assert_image_equal(im, im2) im.save(out,", "self.assertEqual(im.size, (500, 500)) self._assert_noerr(im) def test_g4_tiff_bytesio(self): \"\"\"Testing the stringio loading code path\"\"\" file", "libtiff defaults to writing in native endian, so # on big endian, we'll", "PillowTestCase, hopper, py3 import os import io from PIL import Image, TiffImagePlugin class", "PIL import ImageFilter out = self.tempfile('temp.tif') im = Image.open('Tests/images/pport_g4.tif') im = im.convert('L') im", "test_write_metadata(self): \"\"\" Test metadata writing through libtiff \"\"\" img = Image.open('Tests/images/hopper_g4.tif') f =", "b'\\x01') out = self.tempfile(\"temp.tif\") # out = \"temp.le.tif\" im.save(out) reread = Image.open(out) self.assertEqual(reread.info['compression'],", "im2 = Image.open(out) self.assert_image_equal(im, im2) im.save(out, compression='jpeg') im2 = Image.open(out) self.assert_image_similar(im, im2, 30)", "if tag.endswith('Resolution'): val = original[tag] self.assert_almost_equal( val[0][0]/val[0][1], value[0][0]/value[0][1], msg=\"%s didn't roundtrip\" % tag)", "not available\") def _assert_noerr(self, im): \"\"\"Helper tests that assert basic sanity about the", "\"\"\" img = Image.open('Tests/images/hopper_g4.tif') f = self.tempfile('temp.tiff') img.save(f, tiffinfo=img.tag) loaded = Image.open(f) original", "so that a 12bit FFF is 16bit FFF0, # so we need to", "compressed tif. from PIL import ImageFilter out = self.tempfile('temp.tif') im = Image.open('Tests/images/pport_g4.tif') im", "file = \"Tests/images/hopper_g4_500.tif\" s = io.BytesIO() with open(file, 'rb') as f: s.write(f.read()) s.seek(0)", "= Image.open(file) out = self.tempfile(\"temp.tif\") orig.tag[269] = 'temp.tif' orig.save(out) reread = Image.open(out) self.assertEqual('temp.tif',", "# convert 12bit.cropped.tif -depth 16 tmp.tif # convert tmp.tif -evaluate RightShift 4 12in16bit2.tif", "20x20 blue im.seek(0) self.assertEqual(im.size, (10, 10)) self.assertEqual(im.convert('RGB').getpixel((0, 0)), (0, 128, 0)) self.assertTrue(im.tag.next) im.seek(1)", "test_g4_tiff_bytesio(self): \"\"\"Testing the stringio loading code path\"\"\" file = \"Tests/images/hopper_g4_500.tif\" s = io.BytesIO()", "out = self.tempfile(\"temp.tif\") i.save(out, compression='group3') reread = Image.open(out) self.assertEqual(reread.info['compression'], 'group3') self.assert_image_equal(reread, i) def", "im = Image.open(file) self.assertEqual(im.mode, \"RGB\") self.assertEqual(im.size, (278, 374)) self.assertEqual( im.tile[0][:3], ('tiff_adobe_deflate', (0, 0,", "the target -- # convert 12bit.cropped.tif -depth 16 tmp.tif # convert tmp.tif -evaluate", "'RowsPerStrip', 'PageNumber', 'PhotometricInterpretation'] for tag, value in reloaded.items(): if tag not in ignored:", "import io from PIL import Image, TiffImagePlugin class LibTiffTestCase(PillowTestCase): def setUp(self): codecs =", "test_cmyk_save(self): im = hopper('CMYK') out = self.tempfile('temp.tif') im.save(out, compression='tiff_adobe_deflate') im2 = Image.open(out) self.assert_image_equal(im,", "'I;16') b = im.tobytes() # Bytes are in image native order (little endian)", "12in16bit2.tif # imagemagick will auto scale so that a 12bit FFF is 16bit", "val = reloaded[tag] self.assert_almost_equal( val[0][0]/val[0][1], value[0][0]/value[0][1], msg=\"%s didn't roundtrip\" % tag) else: self.assertEqual(", "= \"Tests/images/hopper_g4_500.tif\" with open(file, 'rb') as f: im = Image.open(f) self.assertEqual(im.size, (500, 500))", "self.assertEqual(b[1], b'\\xe0') out = self.tempfile(\"temp.tif\") im.save(out) reread = Image.open(out) self.assertEqual(reread.info['compression'], im.info['compression']) self.assertEqual(reread.getpixel((0, 0)),", "= Image.open('Tests/images/hopper_g4_500.tif') self.assert_image_equal(g4, png) # see https://github.com/python-pillow/Pillow/issues/279 def test_g4_fillorder_eq_png(self): \"\"\" Checking that we're", "def test_g4_write(self): \"\"\"Checking to see that the saved image is the same as", "test_g4_tiff_file(self): \"\"\"Testing the string load path\"\"\" file = \"Tests/images/hopper_g4_500.tif\" with open(file, 'rb') as", "that a 12bit FFF is 16bit FFF0, # so we need to unshift", "self.assertEqual(im._compression, 'group4') except: print(\"No _compression\") print (dir(im)) # can we write it back", "file path load path\"\"\" file = \"Tests/images/hopper_g4_500.tif\" im = Image.open(file) self.assertEqual(im.size, (500, 500))", "Image.open(out) self.assertEqual(reread.info['compression'], im.info['compression']) self.assertEqual(reread.getpixel((0, 0)), 480) def test_g4_string_info(self): \"\"\"Tests String data in info", "im.save(out, compression='group3')) self.assertRaises(IOError, lambda: im.save(out, compression='group4')) def test_fp_leak(self): im = Image.open(\"Tests/images/hopper_g4_500.tif\") fn =", "self.assertEqual(im.convert('RGB').getpixel((0, 0)), (0, 128, 0)) self.assertTrue(im.tag.next) im.seek(1) self.assertEqual(im.size, (10, 10)) self.assertEqual(im.convert('RGB').getpixel((0, 0)), (255,", "val = original[tag] self.assert_almost_equal( val[0][0]/val[0][1], value[0][0]/value[0][1], msg=\"%s didn't roundtrip\" % tag) else: self.assertEqual(", "PIL import Image, TiffImagePlugin class LibTiffTestCase(PillowTestCase): def setUp(self): codecs = dir(Image.core) if \"libtiff_encoder\"", "test_g4_write(self): \"\"\"Checking to see that the saved image is the same as what", "out = self.tempfile(\"temp.tif\") # out = \"temp.le.tif\" im.save(out) reread = Image.open(out) self.assertEqual(reread.info['compression'], im.info['compression'])", "self.assertEqual(b[0], b'\\xe0') self.assertEqual(b[1], b'\\x01') out = self.tempfile(\"temp.tif\") # out = \"temp.le.tif\" im.save(out) reread", "16bit FFF0, # so we need to unshift so that the integer values" ]
[ "'val1', 'key2': 'val2'} test_master_url = \"TEST1\" test_app_name = \"TEST_APP\" mock_spark_session_builder = Mock() #", "unittest from unittest.mock import Mock from app_processor.pyspark_hb_app_processor import parse_configs, get_or_generate_spark_session class TestAppProcessor(unittest.TestCase): def", "self.assertEqual(2, len(conf)) def test_get_or_generate_spark_session(self): test_map = {'key1': 'val1', 'key2': 'val2'} test_master_url = \"TEST1\"", "this since we use the builder pattern when creating the SparkSession attrs =", "since we use the builder pattern when creating the SparkSession attrs = {'master.return_value':", "pattern when creating the SparkSession attrs = {'master.return_value': mock_spark_session_builder, 'appName.return_value': mock_spark_session_builder} mock_spark_session_builder.configure_mock(**attrs) get_or_generate_spark_session(mock_spark_session_builder,", "'val2'} test_master_url = \"TEST1\" test_app_name = \"TEST_APP\" mock_spark_session_builder = Mock() # we have", "get_or_generate_spark_session(mock_spark_session_builder, test_map, test_master_url, test_app_name) mock_spark_session_builder.master.assert_called_with(test_master_url) mock_spark_session_builder.appName.assert_called_with(test_app_name) self.assertEqual(len(test_map), mock_spark_session_builder.config.call_count) mock_spark_session_builder.getOrCreate.assert_called_once() if __name__ == '__main__':", "SparkSession attrs = {'master.return_value': mock_spark_session_builder, 'appName.return_value': mock_spark_session_builder} mock_spark_session_builder.configure_mock(**attrs) get_or_generate_spark_session(mock_spark_session_builder, test_map, test_master_url, test_app_name) mock_spark_session_builder.master.assert_called_with(test_master_url)", "builder pattern when creating the SparkSession attrs = {'master.return_value': mock_spark_session_builder, 'appName.return_value': mock_spark_session_builder} mock_spark_session_builder.configure_mock(**attrs)", "test_get_or_generate_spark_session(self): test_map = {'key1': 'val1', 'key2': 'val2'} test_master_url = \"TEST1\" test_app_name = \"TEST_APP\"", "{'key1': 'val1', 'key2': 'val2'} test_master_url = \"TEST1\" test_app_name = \"TEST_APP\" mock_spark_session_builder = Mock()", "mock_spark_session_builder} mock_spark_session_builder.configure_mock(**attrs) get_or_generate_spark_session(mock_spark_session_builder, test_map, test_master_url, test_app_name) mock_spark_session_builder.master.assert_called_with(test_master_url) mock_spark_session_builder.appName.assert_called_with(test_app_name) self.assertEqual(len(test_map), mock_spark_session_builder.config.call_count) mock_spark_session_builder.getOrCreate.assert_called_once() if __name__", "test_parse_configs(self): conf = parse_configs(\"test-conf.yml\") self.assertEqual('value1', conf['key1']) self.assertEqual('value2', conf['key2']) self.assertEqual(2, len(conf)) def test_get_or_generate_spark_session(self): test_map", "<filename>tests/test_pyspark_hb_app_processor.py<gh_stars>0 import unittest from unittest.mock import Mock from app_processor.pyspark_hb_app_processor import parse_configs, get_or_generate_spark_session class", "conf['key1']) self.assertEqual('value2', conf['key2']) self.assertEqual(2, len(conf)) def test_get_or_generate_spark_session(self): test_map = {'key1': 'val1', 'key2': 'val2'}", "we have to do this since we use the builder pattern when creating", "class TestAppProcessor(unittest.TestCase): def test_parse_configs(self): conf = parse_configs(\"test-conf.yml\") self.assertEqual('value1', conf['key1']) self.assertEqual('value2', conf['key2']) self.assertEqual(2, len(conf))", "have to do this since we use the builder pattern when creating the", "to do this since we use the builder pattern when creating the SparkSession", "the SparkSession attrs = {'master.return_value': mock_spark_session_builder, 'appName.return_value': mock_spark_session_builder} mock_spark_session_builder.configure_mock(**attrs) get_or_generate_spark_session(mock_spark_session_builder, test_map, test_master_url, test_app_name)", "= parse_configs(\"test-conf.yml\") self.assertEqual('value1', conf['key1']) self.assertEqual('value2', conf['key2']) self.assertEqual(2, len(conf)) def test_get_or_generate_spark_session(self): test_map = {'key1':", "= Mock() # we have to do this since we use the builder", "'key2': 'val2'} test_master_url = \"TEST1\" test_app_name = \"TEST_APP\" mock_spark_session_builder = Mock() # we", "mock_spark_session_builder, 'appName.return_value': mock_spark_session_builder} mock_spark_session_builder.configure_mock(**attrs) get_or_generate_spark_session(mock_spark_session_builder, test_map, test_master_url, test_app_name) mock_spark_session_builder.master.assert_called_with(test_master_url) mock_spark_session_builder.appName.assert_called_with(test_app_name) self.assertEqual(len(test_map), mock_spark_session_builder.config.call_count) mock_spark_session_builder.getOrCreate.assert_called_once()", "use the builder pattern when creating the SparkSession attrs = {'master.return_value': mock_spark_session_builder, 'appName.return_value':", "do this since we use the builder pattern when creating the SparkSession attrs", "import parse_configs, get_or_generate_spark_session class TestAppProcessor(unittest.TestCase): def test_parse_configs(self): conf = parse_configs(\"test-conf.yml\") self.assertEqual('value1', conf['key1']) self.assertEqual('value2',", "we use the builder pattern when creating the SparkSession attrs = {'master.return_value': mock_spark_session_builder,", "def test_parse_configs(self): conf = parse_configs(\"test-conf.yml\") self.assertEqual('value1', conf['key1']) self.assertEqual('value2', conf['key2']) self.assertEqual(2, len(conf)) def test_get_or_generate_spark_session(self):", "# we have to do this since we use the builder pattern when", "unittest.mock import Mock from app_processor.pyspark_hb_app_processor import parse_configs, get_or_generate_spark_session class TestAppProcessor(unittest.TestCase): def test_parse_configs(self): conf", "conf = parse_configs(\"test-conf.yml\") self.assertEqual('value1', conf['key1']) self.assertEqual('value2', conf['key2']) self.assertEqual(2, len(conf)) def test_get_or_generate_spark_session(self): test_map =", "app_processor.pyspark_hb_app_processor import parse_configs, get_or_generate_spark_session class TestAppProcessor(unittest.TestCase): def test_parse_configs(self): conf = parse_configs(\"test-conf.yml\") self.assertEqual('value1', conf['key1'])", "= \"TEST1\" test_app_name = \"TEST_APP\" mock_spark_session_builder = Mock() # we have to do", "\"TEST1\" test_app_name = \"TEST_APP\" mock_spark_session_builder = Mock() # we have to do this", "def test_get_or_generate_spark_session(self): test_map = {'key1': 'val1', 'key2': 'val2'} test_master_url = \"TEST1\" test_app_name =", "import unittest from unittest.mock import Mock from app_processor.pyspark_hb_app_processor import parse_configs, get_or_generate_spark_session class TestAppProcessor(unittest.TestCase):", "conf['key2']) self.assertEqual(2, len(conf)) def test_get_or_generate_spark_session(self): test_map = {'key1': 'val1', 'key2': 'val2'} test_master_url =", "= \"TEST_APP\" mock_spark_session_builder = Mock() # we have to do this since we", "when creating the SparkSession attrs = {'master.return_value': mock_spark_session_builder, 'appName.return_value': mock_spark_session_builder} mock_spark_session_builder.configure_mock(**attrs) get_or_generate_spark_session(mock_spark_session_builder, test_map,", "{'master.return_value': mock_spark_session_builder, 'appName.return_value': mock_spark_session_builder} mock_spark_session_builder.configure_mock(**attrs) get_or_generate_spark_session(mock_spark_session_builder, test_map, test_master_url, test_app_name) mock_spark_session_builder.master.assert_called_with(test_master_url) mock_spark_session_builder.appName.assert_called_with(test_app_name) self.assertEqual(len(test_map), mock_spark_session_builder.config.call_count)", "self.assertEqual('value1', conf['key1']) self.assertEqual('value2', conf['key2']) self.assertEqual(2, len(conf)) def test_get_or_generate_spark_session(self): test_map = {'key1': 'val1', 'key2':", "test_map = {'key1': 'val1', 'key2': 'val2'} test_master_url = \"TEST1\" test_app_name = \"TEST_APP\" mock_spark_session_builder", "from app_processor.pyspark_hb_app_processor import parse_configs, get_or_generate_spark_session class TestAppProcessor(unittest.TestCase): def test_parse_configs(self): conf = parse_configs(\"test-conf.yml\") self.assertEqual('value1',", "TestAppProcessor(unittest.TestCase): def test_parse_configs(self): conf = parse_configs(\"test-conf.yml\") self.assertEqual('value1', conf['key1']) self.assertEqual('value2', conf['key2']) self.assertEqual(2, len(conf)) def", "len(conf)) def test_get_or_generate_spark_session(self): test_map = {'key1': 'val1', 'key2': 'val2'} test_master_url = \"TEST1\" test_app_name", "creating the SparkSession attrs = {'master.return_value': mock_spark_session_builder, 'appName.return_value': mock_spark_session_builder} mock_spark_session_builder.configure_mock(**attrs) get_or_generate_spark_session(mock_spark_session_builder, test_map, test_master_url,", "Mock from app_processor.pyspark_hb_app_processor import parse_configs, get_or_generate_spark_session class TestAppProcessor(unittest.TestCase): def test_parse_configs(self): conf = parse_configs(\"test-conf.yml\")", "get_or_generate_spark_session class TestAppProcessor(unittest.TestCase): def test_parse_configs(self): conf = parse_configs(\"test-conf.yml\") self.assertEqual('value1', conf['key1']) self.assertEqual('value2', conf['key2']) self.assertEqual(2,", "mock_spark_session_builder.configure_mock(**attrs) get_or_generate_spark_session(mock_spark_session_builder, test_map, test_master_url, test_app_name) mock_spark_session_builder.master.assert_called_with(test_master_url) mock_spark_session_builder.appName.assert_called_with(test_app_name) self.assertEqual(len(test_map), mock_spark_session_builder.config.call_count) mock_spark_session_builder.getOrCreate.assert_called_once() if __name__ ==", "Mock() # we have to do this since we use the builder pattern", "parse_configs(\"test-conf.yml\") self.assertEqual('value1', conf['key1']) self.assertEqual('value2', conf['key2']) self.assertEqual(2, len(conf)) def test_get_or_generate_spark_session(self): test_map = {'key1': 'val1',", "attrs = {'master.return_value': mock_spark_session_builder, 'appName.return_value': mock_spark_session_builder} mock_spark_session_builder.configure_mock(**attrs) get_or_generate_spark_session(mock_spark_session_builder, test_map, test_master_url, test_app_name) mock_spark_session_builder.master.assert_called_with(test_master_url) mock_spark_session_builder.appName.assert_called_with(test_app_name)", "test_app_name = \"TEST_APP\" mock_spark_session_builder = Mock() # we have to do this since", "'appName.return_value': mock_spark_session_builder} mock_spark_session_builder.configure_mock(**attrs) get_or_generate_spark_session(mock_spark_session_builder, test_map, test_master_url, test_app_name) mock_spark_session_builder.master.assert_called_with(test_master_url) mock_spark_session_builder.appName.assert_called_with(test_app_name) self.assertEqual(len(test_map), mock_spark_session_builder.config.call_count) mock_spark_session_builder.getOrCreate.assert_called_once() if", "test_map, test_master_url, test_app_name) mock_spark_session_builder.master.assert_called_with(test_master_url) mock_spark_session_builder.appName.assert_called_with(test_app_name) self.assertEqual(len(test_map), mock_spark_session_builder.config.call_count) mock_spark_session_builder.getOrCreate.assert_called_once() if __name__ == '__main__': unittest.main()", "the builder pattern when creating the SparkSession attrs = {'master.return_value': mock_spark_session_builder, 'appName.return_value': mock_spark_session_builder}", "parse_configs, get_or_generate_spark_session class TestAppProcessor(unittest.TestCase): def test_parse_configs(self): conf = parse_configs(\"test-conf.yml\") self.assertEqual('value1', conf['key1']) self.assertEqual('value2', conf['key2'])", "self.assertEqual('value2', conf['key2']) self.assertEqual(2, len(conf)) def test_get_or_generate_spark_session(self): test_map = {'key1': 'val1', 'key2': 'val2'} test_master_url", "test_master_url = \"TEST1\" test_app_name = \"TEST_APP\" mock_spark_session_builder = Mock() # we have to", "= {'key1': 'val1', 'key2': 'val2'} test_master_url = \"TEST1\" test_app_name = \"TEST_APP\" mock_spark_session_builder =", "mock_spark_session_builder = Mock() # we have to do this since we use the", "= {'master.return_value': mock_spark_session_builder, 'appName.return_value': mock_spark_session_builder} mock_spark_session_builder.configure_mock(**attrs) get_or_generate_spark_session(mock_spark_session_builder, test_map, test_master_url, test_app_name) mock_spark_session_builder.master.assert_called_with(test_master_url) mock_spark_session_builder.appName.assert_called_with(test_app_name) self.assertEqual(len(test_map),", "\"TEST_APP\" mock_spark_session_builder = Mock() # we have to do this since we use", "import Mock from app_processor.pyspark_hb_app_processor import parse_configs, get_or_generate_spark_session class TestAppProcessor(unittest.TestCase): def test_parse_configs(self): conf =", "from unittest.mock import Mock from app_processor.pyspark_hb_app_processor import parse_configs, get_or_generate_spark_session class TestAppProcessor(unittest.TestCase): def test_parse_configs(self):" ]
[ "check_validation_ID_Post import check_validation from driver_chrome import * from DataScrapting import * tracking_ID =", "i,j in zip( [i for i in dst_lst if not dst_lst.index(i)%2], [i for", "j.text) for i,j in zip( [i for i in dst_lst if not dst_lst.index(i)%2],", ": ghostbusters(f\"\\n {warning.text}\") else: daemon(\"از سمت شما ترافیک بالایی سمت سرویس های ما", "digit): \") check_validation(tracking_ID) URL = f\"https://tracking.post.ir/?id={tracking_ID}&client=app\" driver = driverChomre(URL) page_source = PageSource(driver) soup", "print(f\"{dst[1]}\") print(\"========================================================================\") elif warning != None : ghostbusters(f\"\\n {warning.text}\") else: daemon(\"از سمت شما", "سمت شما ترافیک بالایی سمت سرویس های ما ارسال می شود! لطفا چند", "dst_lst if not dst_lst.index(i)%2], [i for i in dst_lst if dst_lst.index(i)%2] ) ]", "enumerate(new_lst): print(f\"\\t\\t\\t{i+1}\\n\") print(f\"{dst[0]}\\n\") print(f\"{dst[1]}\") print(\"========================================================================\") elif warning != None : ghostbusters(f\"\\n {warning.text}\") else:", "ghostbusters(f\"\\n {warning.text}\") else: daemon(\"از سمت شما ترافیک بالایی سمت سرویس های ما ارسال", "] new_lst.reverse() print(\"\\n*******************************************************************\") for i,dst in enumerate(new_lst): print(f\"\\t\\t\\t{i+1}\\n\") print(f\"{dst[0]}\\n\") print(f\"{dst[1]}\") print(\"========================================================================\") elif warning", "driverChomre(URL) page_source = PageSource(driver) soup = mining(page_source) warning = soup.warning() security = soup.security()", "check_validation from driver_chrome import * from DataScrapting import * tracking_ID = getpass(\"Enter Your", "f\"https://tracking.post.ir/?id={tracking_ID}&client=app\" driver = driverChomre(URL) page_source = PageSource(driver) soup = mining(page_source) warning = soup.warning()", "from DataScrapting import * tracking_ID = getpass(\"Enter Your Post Id(24 digit): \") check_validation(tracking_ID)", "dst_lst if dst_lst.index(i)%2] ) ] new_lst.reverse() print(\"\\n*******************************************************************\") for i,dst in enumerate(new_lst): print(f\"\\t\\t\\t{i+1}\\n\") print(f\"{dst[0]}\\n\")", "soup.FindAll() new_lst = [(i.text, j.text) for i,j in zip( [i for i in", "import * from DataScrapting import * tracking_ID = getpass(\"Enter Your Post Id(24 digit):", "if warning == None and security == None : dst_lst = soup.FindAll() new_lst", "dst_lst.index(i)%2], [i for i in dst_lst if dst_lst.index(i)%2] ) ] new_lst.reverse() print(\"\\n*******************************************************************\") for", "daemon, ghostbusters, kitty from check_validation_ID_Post import check_validation from driver_chrome import * from DataScrapting", "= driverChomre(URL) page_source = PageSource(driver) soup = mining(page_source) warning = soup.warning() security =", "import * tracking_ID = getpass(\"Enter Your Post Id(24 digit): \") check_validation(tracking_ID) URL =", "if dst_lst.index(i)%2] ) ] new_lst.reverse() print(\"\\n*******************************************************************\") for i,dst in enumerate(new_lst): print(f\"\\t\\t\\t{i+1}\\n\") print(f\"{dst[0]}\\n\") print(f\"{dst[1]}\")", "import getpass from cowsay import daemon, ghostbusters, kitty from check_validation_ID_Post import check_validation from", "print(\"\\n*******************************************************************\") for i,dst in enumerate(new_lst): print(f\"\\t\\t\\t{i+1}\\n\") print(f\"{dst[0]}\\n\") print(f\"{dst[1]}\") print(\"========================================================================\") elif warning != None", "new_lst = [(i.text, j.text) for i,j in zip( [i for i in dst_lst", "and security == None : dst_lst = soup.FindAll() new_lst = [(i.text, j.text) for", "warning == None and security == None : dst_lst = soup.FindAll() new_lst =", "for i in dst_lst if not dst_lst.index(i)%2], [i for i in dst_lst if", "soup.security() if warning == None and security == None : dst_lst = soup.FindAll()", "soup.warning() security = soup.security() if warning == None and security == None :", "= PageSource(driver) soup = mining(page_source) warning = soup.warning() security = soup.security() if warning", "= mining(page_source) warning = soup.warning() security = soup.security() if warning == None and", "getpass(\"Enter Your Post Id(24 digit): \") check_validation(tracking_ID) URL = f\"https://tracking.post.ir/?id={tracking_ID}&client=app\" driver = driverChomre(URL)", "DataScrapting import * tracking_ID = getpass(\"Enter Your Post Id(24 digit): \") check_validation(tracking_ID) URL", "from check_validation_ID_Post import check_validation from driver_chrome import * from DataScrapting import * tracking_ID", "i,dst in enumerate(new_lst): print(f\"\\t\\t\\t{i+1}\\n\") print(f\"{dst[0]}\\n\") print(f\"{dst[1]}\") print(\"========================================================================\") elif warning != None : ghostbusters(f\"\\n", "= getpass(\"Enter Your Post Id(24 digit): \") check_validation(tracking_ID) URL = f\"https://tracking.post.ir/?id={tracking_ID}&client=app\" driver =", "for i,dst in enumerate(new_lst): print(f\"\\t\\t\\t{i+1}\\n\") print(f\"{dst[0]}\\n\") print(f\"{dst[1]}\") print(\"========================================================================\") elif warning != None :", "warning != None : ghostbusters(f\"\\n {warning.text}\") else: daemon(\"از سمت شما ترافیک بالایی سمت", "Post Id(24 digit): \") check_validation(tracking_ID) URL = f\"https://tracking.post.ir/?id={tracking_ID}&client=app\" driver = driverChomre(URL) page_source =", "\") check_validation(tracking_ID) URL = f\"https://tracking.post.ir/?id={tracking_ID}&client=app\" driver = driverChomre(URL) page_source = PageSource(driver) soup =", "print(f\"\\t\\t\\t{i+1}\\n\") print(f\"{dst[0]}\\n\") print(f\"{dst[1]}\") print(\"========================================================================\") elif warning != None : ghostbusters(f\"\\n {warning.text}\") else: daemon(\"از", "warning = soup.warning() security = soup.security() if warning == None and security ==", "PageSource(driver) soup = mining(page_source) warning = soup.warning() security = soup.security() if warning ==", "== None and security == None : dst_lst = soup.FindAll() new_lst = [(i.text,", "soup = mining(page_source) warning = soup.warning() security = soup.security() if warning == None", "شما ترافیک بالایی سمت سرویس های ما ارسال می شود! لطفا چند دقیقه", "URL = f\"https://tracking.post.ir/?id={tracking_ID}&client=app\" driver = driverChomre(URL) page_source = PageSource(driver) soup = mining(page_source) warning", "= soup.warning() security = soup.security() if warning == None and security == None", "getpass from cowsay import daemon, ghostbusters, kitty from check_validation_ID_Post import check_validation from driver_chrome", "print(f\"{dst[0]}\\n\") print(f\"{dst[1]}\") print(\"========================================================================\") elif warning != None : ghostbusters(f\"\\n {warning.text}\") else: daemon(\"از سمت", "ترافیک بالایی سمت سرویس های ما ارسال می شود! لطفا چند دقیقه دیگر", "بالایی سمت سرویس های ما ارسال می شود! لطفا چند دقیقه دیگر امتحان", "= soup.security() if warning == None and security == None : dst_lst =", "for i,j in zip( [i for i in dst_lst if not dst_lst.index(i)%2], [i", "security == None : dst_lst = soup.FindAll() new_lst = [(i.text, j.text) for i,j", "{warning.text}\") else: daemon(\"از سمت شما ترافیک بالایی سمت سرویس های ما ارسال می", ") ] new_lst.reverse() print(\"\\n*******************************************************************\") for i,dst in enumerate(new_lst): print(f\"\\t\\t\\t{i+1}\\n\") print(f\"{dst[0]}\\n\") print(f\"{dst[1]}\") print(\"========================================================================\") elif", "Id(24 digit): \") check_validation(tracking_ID) URL = f\"https://tracking.post.ir/?id={tracking_ID}&client=app\" driver = driverChomre(URL) page_source = PageSource(driver)", "if not dst_lst.index(i)%2], [i for i in dst_lst if dst_lst.index(i)%2] ) ] new_lst.reverse()", "[(i.text, j.text) for i,j in zip( [i for i in dst_lst if not", "kitty from check_validation_ID_Post import check_validation from driver_chrome import * from DataScrapting import *", "= f\"https://tracking.post.ir/?id={tracking_ID}&client=app\" driver = driverChomre(URL) page_source = PageSource(driver) soup = mining(page_source) warning =", "in zip( [i for i in dst_lst if not dst_lst.index(i)%2], [i for i", "ghostbusters, kitty from check_validation_ID_Post import check_validation from driver_chrome import * from DataScrapting import", "security = soup.security() if warning == None and security == None : dst_lst", "mining(page_source) warning = soup.warning() security = soup.security() if warning == None and security", "[i for i in dst_lst if dst_lst.index(i)%2] ) ] new_lst.reverse() print(\"\\n*******************************************************************\") for i,dst", "in dst_lst if dst_lst.index(i)%2] ) ] new_lst.reverse() print(\"\\n*******************************************************************\") for i,dst in enumerate(new_lst): print(f\"\\t\\t\\t{i+1}\\n\")", "from stdiomask import getpass from cowsay import daemon, ghostbusters, kitty from check_validation_ID_Post import", "in dst_lst if not dst_lst.index(i)%2], [i for i in dst_lst if dst_lst.index(i)%2] )", "print(\"========================================================================\") elif warning != None : ghostbusters(f\"\\n {warning.text}\") else: daemon(\"از سمت شما ترافیک", "page_source = PageSource(driver) soup = mining(page_source) warning = soup.warning() security = soup.security() if", "elif warning != None : ghostbusters(f\"\\n {warning.text}\") else: daemon(\"از سمت شما ترافیک بالایی", "tracking_ID = getpass(\"Enter Your Post Id(24 digit): \") check_validation(tracking_ID) URL = f\"https://tracking.post.ir/?id={tracking_ID}&client=app\" driver", "Your Post Id(24 digit): \") check_validation(tracking_ID) URL = f\"https://tracking.post.ir/?id={tracking_ID}&client=app\" driver = driverChomre(URL) page_source", "dst_lst = soup.FindAll() new_lst = [(i.text, j.text) for i,j in zip( [i for", "* tracking_ID = getpass(\"Enter Your Post Id(24 digit): \") check_validation(tracking_ID) URL = f\"https://tracking.post.ir/?id={tracking_ID}&client=app\"", "None : ghostbusters(f\"\\n {warning.text}\") else: daemon(\"از سمت شما ترافیک بالایی سمت سرویس های", "in enumerate(new_lst): print(f\"\\t\\t\\t{i+1}\\n\") print(f\"{dst[0]}\\n\") print(f\"{dst[1]}\") print(\"========================================================================\") elif warning != None : ghostbusters(f\"\\n {warning.text}\")", "driver = driverChomre(URL) page_source = PageSource(driver) soup = mining(page_source) warning = soup.warning() security", "import check_validation from driver_chrome import * from DataScrapting import * tracking_ID = getpass(\"Enter", "سمت سرویس های ما ارسال می شود! لطفا چند دقیقه دیگر امتحان کنید.\")", "* from DataScrapting import * tracking_ID = getpass(\"Enter Your Post Id(24 digit): \")", "driver_chrome import * from DataScrapting import * tracking_ID = getpass(\"Enter Your Post Id(24", "cowsay import daemon, ghostbusters, kitty from check_validation_ID_Post import check_validation from driver_chrome import *", ": dst_lst = soup.FindAll() new_lst = [(i.text, j.text) for i,j in zip( [i", "= soup.FindAll() new_lst = [(i.text, j.text) for i,j in zip( [i for i", "check_validation(tracking_ID) URL = f\"https://tracking.post.ir/?id={tracking_ID}&client=app\" driver = driverChomre(URL) page_source = PageSource(driver) soup = mining(page_source)", "!= None : ghostbusters(f\"\\n {warning.text}\") else: daemon(\"از سمت شما ترافیک بالایی سمت سرویس", "import daemon, ghostbusters, kitty from check_validation_ID_Post import check_validation from driver_chrome import * from", "i in dst_lst if not dst_lst.index(i)%2], [i for i in dst_lst if dst_lst.index(i)%2]", "for i in dst_lst if dst_lst.index(i)%2] ) ] new_lst.reverse() print(\"\\n*******************************************************************\") for i,dst in", "[i for i in dst_lst if not dst_lst.index(i)%2], [i for i in dst_lst", "daemon(\"از سمت شما ترافیک بالایی سمت سرویس های ما ارسال می شود! لطفا", "from driver_chrome import * from DataScrapting import * tracking_ID = getpass(\"Enter Your Post", "new_lst.reverse() print(\"\\n*******************************************************************\") for i,dst in enumerate(new_lst): print(f\"\\t\\t\\t{i+1}\\n\") print(f\"{dst[0]}\\n\") print(f\"{dst[1]}\") print(\"========================================================================\") elif warning !=", "None : dst_lst = soup.FindAll() new_lst = [(i.text, j.text) for i,j in zip(", "else: daemon(\"از سمت شما ترافیک بالایی سمت سرویس های ما ارسال می شود!", "i in dst_lst if dst_lst.index(i)%2] ) ] new_lst.reverse() print(\"\\n*******************************************************************\") for i,dst in enumerate(new_lst):", "None and security == None : dst_lst = soup.FindAll() new_lst = [(i.text, j.text)", "= [(i.text, j.text) for i,j in zip( [i for i in dst_lst if", "zip( [i for i in dst_lst if not dst_lst.index(i)%2], [i for i in", "== None : dst_lst = soup.FindAll() new_lst = [(i.text, j.text) for i,j in", "not dst_lst.index(i)%2], [i for i in dst_lst if dst_lst.index(i)%2] ) ] new_lst.reverse() print(\"\\n*******************************************************************\")", "from cowsay import daemon, ghostbusters, kitty from check_validation_ID_Post import check_validation from driver_chrome import", "dst_lst.index(i)%2] ) ] new_lst.reverse() print(\"\\n*******************************************************************\") for i,dst in enumerate(new_lst): print(f\"\\t\\t\\t{i+1}\\n\") print(f\"{dst[0]}\\n\") print(f\"{dst[1]}\") print(\"========================================================================\")", "stdiomask import getpass from cowsay import daemon, ghostbusters, kitty from check_validation_ID_Post import check_validation" ]